hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
5a0c6e1c2a8211eb59bb5b17381262149be6f6d8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "glcm_calculation.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int *glcm = NULL; hipMalloc(&glcm, XSIZE*YSIZE); float *glcmNorm = NULL; hipMalloc(&glcmNorm, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int maxx = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( glcm_calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, A,glcm,glcmNorm,nx,ny,maxx); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( glcm_calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, A,glcm,glcmNorm,nx,ny,maxx); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( glcm_calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, A,glcm,glcmNorm,nx,ny,maxx); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5a0c6e1c2a8211eb59bb5b17381262149be6f6d8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "glcm_calculation.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int *glcm = NULL; cudaMalloc(&glcm, XSIZE*YSIZE); float *glcmNorm = NULL; cudaMalloc(&glcmNorm, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int maxx = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); glcm_calculation<<<gridBlock,threadBlock>>>(A,glcm,glcmNorm,nx,ny,maxx); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { glcm_calculation<<<gridBlock,threadBlock>>>(A,glcm,glcmNorm,nx,ny,maxx); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { glcm_calculation<<<gridBlock,threadBlock>>>(A,glcm,glcmNorm,nx,ny,maxx); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
61ec9097bf777541337040e4f2b27a24c1b9281e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion int r; int c; for(r=0; r<numRows; ++r ){ for(c=0; c<numCols; ++c){ uchar4 rgba = rgbaImage[r * numCols + c]; greyImage[r * numCols + c] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; } } //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(x, y, 1); //TODO const dim3 gridSize(x, 1, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
61ec9097bf777541337040e4f2b27a24c1b9281e.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion int r; int c; for(r=0; r<numRows; ++r ){ for(c=0; c<numCols; ++c){ uchar4 rgba = rgbaImage[r * numCols + c]; greyImage[r * numCols + c] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; } } //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(x, y, 1); //TODO const dim3 gridSize(x, 1, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
2d110907c9eefead3c7ed7416d1e85ff3e5359dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Parallel bitonic sort using CUDA. * Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm */ #include <stdlib.h> #include <stdio.h> #include <time.h> /* * Every thread gets exactly one value in the unsorted array. */ #define THREADS 512 #define BLOCKS 65536 #define NUM_VALS THREADS*BLOCKS float random_float() { return (float)rand()/(float)RAND_MAX; } void array_print(float *arr, int length) { int i; for (i = 0; i < length; ++i) { printf("%1.3f ", arr[i]); } printf("\n"); } void array_fill(float *arr, int length) { srand(time(NULL)); int i; for (i = 0; i < length; ++i) { arr[i] = random_float(); } } /* * GPU simple synchronization function. * See: http://eprints.cs.vt.edu/archive/00001087/01/TR_GPU_synchronization.pdf */ /* The mutex variable */ __device__ int g_mutex = 0; __device__ void __gpu_sync(int goalVal) { /* Thread ID in a block */ int tid_in_block = threadIdx.x * blockDim.y + threadIdx.y; /* Only thread 0 is used for synchronization */ if (tid_in_block == 0) { atomicAdd(&g_mutex, 1); while(g_mutex != goalVal) { /* Wait until all blocks have increased g_mutex */ } } __syncthreads(); } __device__ void bitonic_sort_step(float *dev_values, int j, int k) { unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (dev_values[i]>dev_values[ixj]) { /* exchange(i,ixj); */ float temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } if ((i&k)!=0) { /* Sort descending */ if (dev_values[i]<dev_values[ixj]) { /* exchange(i,ixj); */ float temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } } } __global__ void bitonic_sort(float *dev_values) { int j, k, goal_value = 0; /* Major step */ for (k = 2; k <= NUM_VALS; k <<= 1) { /* Minor step */ for (j=k>>1; j>0; j=j>>1) { bitonic_sort_step(dev_values, j, k); goal_value += BLOCKS; __gpu_sync(goal_value); } } } int main(void) { float *values = (float*) malloc( NUM_VALS * sizeof(float)); array_fill(values, NUM_VALS); /* array_print(values, NUM_VALS); */ float *dev_values; size_t size = NUM_VALS * sizeof(float); hipMalloc((void**) &dev_values, size); hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice); dim3 blocks(BLOCKS,1); /* Number of blocks */ dim3 threads(THREADS,1); /* Number of threads */ hipLaunchKernelGGL(( bitonic_sort), dim3(blocks), dim3(threads), 0, 0, dev_values); hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost); hipFree(dev_values); /*array_print(values, NUM_VALS);*/ }
2d110907c9eefead3c7ed7416d1e85ff3e5359dc.cu
/* * Parallel bitonic sort using CUDA. * Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm */ #include <stdlib.h> #include <stdio.h> #include <time.h> /* * Every thread gets exactly one value in the unsorted array. */ #define THREADS 512 #define BLOCKS 65536 #define NUM_VALS THREADS*BLOCKS float random_float() { return (float)rand()/(float)RAND_MAX; } void array_print(float *arr, int length) { int i; for (i = 0; i < length; ++i) { printf("%1.3f ", arr[i]); } printf("\n"); } void array_fill(float *arr, int length) { srand(time(NULL)); int i; for (i = 0; i < length; ++i) { arr[i] = random_float(); } } /* * GPU simple synchronization function. * See: http://eprints.cs.vt.edu/archive/00001087/01/TR_GPU_synchronization.pdf */ /* The mutex variable */ __device__ int g_mutex = 0; __device__ void __gpu_sync(int goalVal) { /* Thread ID in a block */ int tid_in_block = threadIdx.x * blockDim.y + threadIdx.y; /* Only thread 0 is used for synchronization */ if (tid_in_block == 0) { atomicAdd(&g_mutex, 1); while(g_mutex != goalVal) { /* Wait until all blocks have increased g_mutex */ } } __syncthreads(); } __device__ void bitonic_sort_step(float *dev_values, int j, int k) { unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (dev_values[i]>dev_values[ixj]) { /* exchange(i,ixj); */ float temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } if ((i&k)!=0) { /* Sort descending */ if (dev_values[i]<dev_values[ixj]) { /* exchange(i,ixj); */ float temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } } } __global__ void bitonic_sort(float *dev_values) { int j, k, goal_value = 0; /* Major step */ for (k = 2; k <= NUM_VALS; k <<= 1) { /* Minor step */ for (j=k>>1; j>0; j=j>>1) { bitonic_sort_step(dev_values, j, k); goal_value += BLOCKS; __gpu_sync(goal_value); } } } int main(void) { float *values = (float*) malloc( NUM_VALS * sizeof(float)); array_fill(values, NUM_VALS); /* array_print(values, NUM_VALS); */ float *dev_values; size_t size = NUM_VALS * sizeof(float); cudaMalloc((void**) &dev_values, size); cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice); dim3 blocks(BLOCKS,1); /* Number of blocks */ dim3 threads(THREADS,1); /* Number of threads */ bitonic_sort<<<blocks, threads>>>(dev_values); cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost); cudaFree(dev_values); /*array_print(values, NUM_VALS);*/ }
f6ed4313f0f0c94292653c36b6ec1cfa74d2a2ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <math.h> #include <cstdio> #include <sstream> #include <tuple> #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" namespace { // A little structure for holding details about a pixel. struct Pix { float z; // Depth of the reference point. int32_t idx; // Index of the reference point. float dist2; // Euclidean distance square to the reference point. }; __device__ inline bool operator<(const Pix& a, const Pix& b) { return a.z < b.z; } // This function checks if a pixel given by xy location pxy lies within the // point with index p and batch index n. One of the inputs is a list (q) // which contains Pixel structs with the indices of the points which intersect // with this pixel sorted by closest z distance. If the pixel pxy lies in the // point, the list (q) is updated and re-orderered in place. In addition // the auxiliary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizePointsNaiveCudaKernel and // RasterizePointsFineCudaKernel. template <typename PointQ> __device__ void CheckPixelInsidePoint( const float* points, // (P, 3) const int p_idx, int& q_size, float& q_max_z, int& q_max_idx, PointQ& q, const float* radius, const float xf, const float yf, const int K) { const float px = points[p_idx * 3 + 0]; const float py = points[p_idx * 3 + 1]; const float pz = points[p_idx * 3 + 2]; const float p_radius = radius[p_idx]; const float radius2 = p_radius * p_radius; if (pz < 0) return; // Don't render points behind the camera const float dx = xf - px; const float dy = yf - py; const float dist2 = dx * dx + dy * dy; if (dist2 < radius2) { if (q_size < K) { // Just insert it q[q_size] = {pz, p_idx, dist2}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max q[q_max_idx] = {pz, p_idx, dist2}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsNaiveCudaKernel( const float* points, // (P, 3) const int64_t* cloud_to_packed_first_idx, // (N) const int64_t* num_points_per_cloud, // (N) const float* radius, const int N, const int H, const int W, const int K, int32_t* point_idxs, // (N, H, W, K) float* zbuf, // (N, H, W, K) float* pix_dists) { // (N, H, W, K) // Simple version: One thread per output pixel const int num_threads = gridDim.x * blockDim.x; const int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // Batch index const int pix_idx = i % (H * W); // Reverse ordering of the X and Y axis as the camera coordinates // assume that +Y is pointing up and +X is pointing left. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordinates of pixel. const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. // TODO(jcjohns) Abstract this out into a standalone data structure Pix q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the points. const int64_t point_start_idx = cloud_to_packed_first_idx[n]; const int64_t point_stop_idx = point_start_idx + num_points_per_cloud[n]; for (int p_idx = point_start_idx; p_idx < point_stop_idx; ++p_idx) { CheckPixelInsidePoint( points, p_idx, q_size, q_max_z, q_max_idx, q, radius, xf, yf, K); } BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { point_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist2; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor> RasterizePointsNaiveCuda( const at::Tensor& points, // (P. 3) const at::Tensor& cloud_to_packed_first_idx, // (N) const at::Tensor& num_points_per_cloud, // (N) const std::tuple<int, int> image_size, const at::Tensor& radius, const int points_per_pixel) { // Check inputs are on the same device at::TensorArg points_t{points, "points", 1}, cloud_to_packed_first_idx_t{ cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2}, num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3}; at::CheckedFrom c = "RasterizePointsNaiveCuda"; at::checkAllSameGPU( c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); TORCH_CHECK( points.ndimension() == 2 && points.size(1) == 3, "points must have dimensions (num_points, 3)"); TORCH_CHECK( num_points_per_cloud.size(0) == cloud_to_packed_first_idx.size(0), "num_points_per_cloud must have same size first dimension as cloud_to_packed_first_idx"); const int N = num_points_per_cloud.size(0); // batch size. const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int K = points_per_pixel; if (K > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } auto int_opts = num_points_per_cloud.options().dtype(at::kInt); auto float_opts = points.options().dtype(at::kFloat); at::Tensor point_idxs = at::full({N, H, W, K}, -1, int_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); if (point_idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(point_idxs, zbuf, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizePointsNaiveCudaKernel), dim3(blocks), dim3(threads), 0, stream, points.contiguous().data_ptr<float>(), cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(), num_points_per_cloud.contiguous().data_ptr<int64_t>(), radius.contiguous().data_ptr<float>(), N, H, W, K, point_idxs.contiguous().data_ptr<int32_t>(), zbuf.contiguous().data_ptr<float>(), pix_dists.contiguous().data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(point_idxs, zbuf, pix_dists); } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsCoarseCudaKernel( const float* points, // (P, 3) const int64_t* cloud_to_packed_first_idx, // (N) const int64_t* num_points_per_cloud, // (N) const float* radius, const int N, const int P, const int H, const int W, const int bin_size, const int chunk_size, const int max_points_per_bin, int* points_per_bin, int* bin_points) { extern __shared__ char sbuf[]; const int M = max_points_per_bin; // Integer divide round up const int num_bins_x = 1 + (W - 1) / bin_size; const int num_bins_y = 1 + (H - 1) / bin_size; // NDC range depends on the ratio of W/H // The shorter side from (H, W) is given an NDC range of 2.0 and // the other side is scaled by the ratio of H:W. const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f; const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f; // Size of half a pixel in NDC units is the NDC half range // divided by the corresponding image dimension const float half_pix_x = NDC_x_half_range / W; const float half_pix_y = NDC_y_half_range / H; // This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size); // Have each block handle a chunk of points and build a 3D bitmask in // shared memory to mark which points hit which bins. In this first phase, // each thread processes one point at a time. After processing the chunk, // one thread is assigned per bin, and the thread counts and writes the // points for the bin out to global memory. const int chunks_per_batch = 1 + (P - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; const int chunk_idx = chunk % chunks_per_batch; const int point_start_idx = chunk_idx * chunk_size; binmask.block_clear(); // Using the batch index of the thread get the start and stop // indices for the points. const int64_t cloud_point_start_idx = cloud_to_packed_first_idx[batch_idx]; const int64_t cloud_point_stop_idx = cloud_point_start_idx + num_points_per_cloud[batch_idx]; // Have each thread handle a different point within the chunk for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) { const int p_idx = point_start_idx + p; // Check if point index corresponds to the cloud in the batch given by // batch_idx. if (p_idx >= cloud_point_stop_idx || p_idx < cloud_point_start_idx) { continue; } const float px = points[p_idx * 3 + 0]; const float py = points[p_idx * 3 + 1]; const float pz = points[p_idx * 3 + 2]; const float p_radius = radius[p_idx]; if (pz < 0) continue; // Don't render points behind the camera. const float px0 = px - p_radius; const float px1 = px + p_radius; const float py0 = py - p_radius; const float py1 = py + p_radius; // Brute-force search over all bins; TODO something smarter? // For example we could compute the exact bin where the point falls, // then check neighboring bins. This way we wouldn't have to check // all bins (however then we might have more warp divergence?) for (int by = 0; by < num_bins_y; ++by) { // Get y extent for the bin. PixToNonSquareNdc gives us the location of // the center of each pixel, so we need to add/subtract a half // pixel to get the true extent of the bin. const float by0 = PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y; const float by1 = PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y; const bool y_overlap = (py0 <= by1) && (by0 <= py1); if (!y_overlap) { continue; } for (int bx = 0; bx < num_bins_x; ++bx) { // Get x extent for the bin; again we need to adjust the // output of PixToNonSquareNdc by half a pixel. const float bx0 = PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x; const float bx1 = PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x; const bool x_overlap = (px0 <= bx1) && (bx0 <= px1); if (x_overlap) { binmask.set(by, bx, p); } } } } __syncthreads(); // Now we have processed every point in the current chunk. We need to // count the number of points in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x; byx += blockDim.x) { const int by = byx / num_bins_x; const int bx = byx % num_bins_x; const int count = binmask.count(by, bx); const int points_per_bin_idx = batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx; // This atomically increments the (global) number of points found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_points array for the // points in the current chunk that fall into this bin. const int start = atomicAdd(points_per_bin + points_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_points. int next_idx = batch_idx * num_bins_y * num_bins_x * M + by * num_bins_x * M + bx * M + start; for (int p = 0; p < chunk_size; ++p) { if (binmask.get(by, bx, p)) { // TODO: Throw an error if next_idx >= M -- this means that // we got more than max_points_per_bin in this bin // TODO: check if atomicAdd is needed in line 265. bin_points[next_idx] = point_start_idx + p; next_idx++; } } } __syncthreads(); } } at::Tensor RasterizePointsCoarseCuda( const at::Tensor& points, // (P, 3) const at::Tensor& cloud_to_packed_first_idx, // (N) const at::Tensor& num_points_per_cloud, // (N) const std::tuple<int, int> image_size, const at::Tensor& radius, const int bin_size, const int max_points_per_bin) { TORCH_CHECK( points.ndimension() == 2 && points.size(1) == 3, "points must have dimensions (num_points, 3)"); // Check inputs are on the same device at::TensorArg points_t{points, "points", 1}, cloud_to_packed_first_idx_t{ cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2}, num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3}; at::CheckedFrom c = "RasterizePointsCoarseCuda"; at::checkAllSameGPU( c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int P = points.size(0); const int N = num_points_per_cloud.size(0); const int M = max_points_per_bin; // Integer divide round up. const int num_bins_y = 1 + (H - 1) / bin_size; const int num_bins_x = 1 + (W - 1) / bin_size; if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) { // Make sure we do not use too much shared memory. std::stringstream ss; ss << "In Coarse Rasterizer got num_bins_y: " << num_bins_y << ", num_bins_x: " << num_bins_x << ", " << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = num_points_per_cloud.options().dtype(at::kInt); at::Tensor points_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts); at::Tensor bin_points = at::full({N, num_bins_y, num_bins_x, M}, -1, opts); if (bin_points.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return bin_points; } const int chunk_size = 512; const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; hipLaunchKernelGGL(( RasterizePointsCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, stream, points.contiguous().data_ptr<float>(), cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(), num_points_per_cloud.contiguous().data_ptr<int64_t>(), radius.contiguous().data_ptr<float>(), N, P, H, W, bin_size, chunk_size, M, points_per_bin.contiguous().data_ptr<int32_t>(), bin_points.contiguous().data_ptr<int32_t>()); AT_CUDA_CHECK(hipGetLastError()); return bin_points; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsFineCudaKernel( const float* points, // (P, 3) const int32_t* bin_points, // (N, BH, BW, T) const float* radius, const int bin_size, const int N, const int BH, // num_bins y const int BW, // num_bins x const int M, const int H, const int W, const int K, int32_t* point_idxs, // (N, H, W, K) float* zbuf, // (N, H, W, K) float* pix_dists) { // (N, H, W, K) // This can be more than H * W if H or W are not divisible by bin_size. const int num_pixels = N * BH * BW * bin_size * bin_size; const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from points and bin_points. int i = pid; const int n = i / (BH * BW * bin_size * bin_size); i %= BH * BW * bin_size * bin_size; const int by = i / (BW * bin_size * bin_size); i %= BW * bin_size * bin_size; const int bx = i / (bin_size * bin_size); i %= bin_size * bin_size; const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); // This part looks like the naive rasterization kernel, except we use // bin_points to only look at a subset of points already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pix q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; ++m) { const int p = bin_points[n * BH * BW * M + by * BW * M + bx * M + m]; if (p < 0) { // bin_points uses -1 as a sentinal value continue; } CheckPixelInsidePoint( points, p, q_size, q_max_z, q_max_idx, q, radius, xf, yf, K); } // Now we've looked at all the points for this bin, so we can write // output for the current pixel. BubbleSort(q, q_size); // Reverse ordering of the X and Y axis as the camera coordinates // assume that +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const int pix_idx = n * H * W * K + yidx * W * K + xidx * K; for (int k = 0; k < q_size; ++k) { point_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist2; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor> RasterizePointsFineCuda( const at::Tensor& points, // (P, 3) const at::Tensor& bin_points, const std::tuple<int, int> image_size, const at::Tensor& radius, const int bin_size, const int points_per_pixel) { // Check inputs are on the same device at::TensorArg points_t{points, "points", 1}, bin_points_t{bin_points, "bin_points", 2}; at::CheckedFrom c = "RasterizePointsFineCuda"; at::checkAllSameGPU(c, {points_t, bin_points_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int N = bin_points.size(0); const int BH = bin_points.size(1); const int BW = bin_points.size(2); const int M = bin_points.size(3); const int K = points_per_pixel; const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 150"); } auto int_opts = bin_points.options().dtype(at::kInt); auto float_opts = points.options().dtype(at::kFloat); at::Tensor point_idxs = at::full({N, H, W, K}, -1, int_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); if (point_idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(point_idxs, zbuf, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizePointsFineCudaKernel), dim3(blocks), dim3(threads), 0, stream, points.contiguous().data_ptr<float>(), bin_points.contiguous().data_ptr<int32_t>(), radius.contiguous().data_ptr<float>(), bin_size, N, BH, BW, M, H, W, K, point_idxs.contiguous().data_ptr<int32_t>(), zbuf.contiguous().data_ptr<float>(), pix_dists.contiguous().data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(point_idxs, zbuf, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO(T55115174) Add more documentation for backward kernel. __global__ void RasterizePointsBackwardCudaKernel( const float* points, // (P, 3) const int32_t* idxs, // (N, H, W, K) const int N, const int P, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_dists, // (N, H, W, K) float* grad_points) { // (P, 3) // Parallelized over each of K points per pixel, for each pixel in images of // size H * W, for each image in the batch of size N. int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < N * H * W * K; i += num_threads) { // const int n = i / (H * W * K); // batch index (not needed). const int yxk = i % (H * W * K); const int yi = yxk / (W * K); const int xk = yxk % (W * K); const int xi = xk / K; // k = xk % K (We don't actually need k, but this would be it.) // Reverse ordering of X and Y axes. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const float xf = PixToNonSquareNdc(xidx, W, H); const float yf = PixToNonSquareNdc(yidx, H, W); const int p = idxs[i]; if (p < 0) continue; const float grad_dist2 = grad_dists[i]; const int p_ind = p * 3; // index into packed points tensor const float px = points[p_ind + 0]; const float py = points[p_ind + 1]; const float dx = px - xf; const float dy = py - yf; const float grad_px = 2.0f * grad_dist2 * dx; const float grad_py = 2.0f * grad_dist2 * dy; const float grad_pz = grad_zbuf[i]; atomicAdd(grad_points + p_ind + 0, grad_px); atomicAdd(grad_points + p_ind + 1, grad_py); atomicAdd(grad_points + p_ind + 2, grad_pz); } } at::Tensor RasterizePointsBackwardCuda( const at::Tensor& points, // (N, P, 3) const at::Tensor& idxs, // (N, H, W, K) const at::Tensor& grad_zbuf, // (N, H, W, K) const at::Tensor& grad_dists) { // (N, H, W, K) // Check inputs are on the same device at::TensorArg points_t{points, "points", 1}, idxs_t{idxs, "idxs", 2}, grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, grad_dists_t{grad_dists, "grad_dists", 4}; at::CheckedFrom c = "RasterizePointsBackwardCuda"; at::checkAllSameGPU(c, {points_t, idxs_t, grad_zbuf_t, grad_dists_t}); at::checkAllSameType(c, {points_t, grad_zbuf_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int P = points.size(0); const int N = idxs.size(0); const int H = idxs.size(1); const int W = idxs.size(2); const int K = idxs.size(3); at::Tensor grad_points = at::zeros({P, 3}, points.options()); if (grad_points.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_points; } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizePointsBackwardCudaKernel), dim3(blocks), dim3(threads), 0, stream, points.contiguous().data_ptr<float>(), idxs.contiguous().data_ptr<int32_t>(), N, P, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_points.contiguous().data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return grad_points; }
f6ed4313f0f0c94292653c36b6ec1cfa74d2a2ec.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <math.h> #include <cstdio> #include <sstream> #include <tuple> #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" namespace { // A little structure for holding details about a pixel. struct Pix { float z; // Depth of the reference point. int32_t idx; // Index of the reference point. float dist2; // Euclidean distance square to the reference point. }; __device__ inline bool operator<(const Pix& a, const Pix& b) { return a.z < b.z; } // This function checks if a pixel given by xy location pxy lies within the // point with index p and batch index n. One of the inputs is a list (q) // which contains Pixel structs with the indices of the points which intersect // with this pixel sorted by closest z distance. If the pixel pxy lies in the // point, the list (q) is updated and re-orderered in place. In addition // the auxiliary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizePointsNaiveCudaKernel and // RasterizePointsFineCudaKernel. template <typename PointQ> __device__ void CheckPixelInsidePoint( const float* points, // (P, 3) const int p_idx, int& q_size, float& q_max_z, int& q_max_idx, PointQ& q, const float* radius, const float xf, const float yf, const int K) { const float px = points[p_idx * 3 + 0]; const float py = points[p_idx * 3 + 1]; const float pz = points[p_idx * 3 + 2]; const float p_radius = radius[p_idx]; const float radius2 = p_radius * p_radius; if (pz < 0) return; // Don't render points behind the camera const float dx = xf - px; const float dy = yf - py; const float dist2 = dx * dx + dy * dy; if (dist2 < radius2) { if (q_size < K) { // Just insert it q[q_size] = {pz, p_idx, dist2}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max q[q_max_idx] = {pz, p_idx, dist2}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsNaiveCudaKernel( const float* points, // (P, 3) const int64_t* cloud_to_packed_first_idx, // (N) const int64_t* num_points_per_cloud, // (N) const float* radius, const int N, const int H, const int W, const int K, int32_t* point_idxs, // (N, H, W, K) float* zbuf, // (N, H, W, K) float* pix_dists) { // (N, H, W, K) // Simple version: One thread per output pixel const int num_threads = gridDim.x * blockDim.x; const int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // Batch index const int pix_idx = i % (H * W); // Reverse ordering of the X and Y axis as the camera coordinates // assume that +Y is pointing up and +X is pointing left. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordinates of pixel. const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. // TODO(jcjohns) Abstract this out into a standalone data structure Pix q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the points. const int64_t point_start_idx = cloud_to_packed_first_idx[n]; const int64_t point_stop_idx = point_start_idx + num_points_per_cloud[n]; for (int p_idx = point_start_idx; p_idx < point_stop_idx; ++p_idx) { CheckPixelInsidePoint( points, p_idx, q_size, q_max_z, q_max_idx, q, radius, xf, yf, K); } BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { point_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist2; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor> RasterizePointsNaiveCuda( const at::Tensor& points, // (P. 3) const at::Tensor& cloud_to_packed_first_idx, // (N) const at::Tensor& num_points_per_cloud, // (N) const std::tuple<int, int> image_size, const at::Tensor& radius, const int points_per_pixel) { // Check inputs are on the same device at::TensorArg points_t{points, "points", 1}, cloud_to_packed_first_idx_t{ cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2}, num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3}; at::CheckedFrom c = "RasterizePointsNaiveCuda"; at::checkAllSameGPU( c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(points.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK( points.ndimension() == 2 && points.size(1) == 3, "points must have dimensions (num_points, 3)"); TORCH_CHECK( num_points_per_cloud.size(0) == cloud_to_packed_first_idx.size(0), "num_points_per_cloud must have same size first dimension as cloud_to_packed_first_idx"); const int N = num_points_per_cloud.size(0); // batch size. const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int K = points_per_pixel; if (K > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } auto int_opts = num_points_per_cloud.options().dtype(at::kInt); auto float_opts = points.options().dtype(at::kFloat); at::Tensor point_idxs = at::full({N, H, W, K}, -1, int_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); if (point_idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(point_idxs, zbuf, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; RasterizePointsNaiveCudaKernel<<<blocks, threads, 0, stream>>>( points.contiguous().data_ptr<float>(), cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(), num_points_per_cloud.contiguous().data_ptr<int64_t>(), radius.contiguous().data_ptr<float>(), N, H, W, K, point_idxs.contiguous().data_ptr<int32_t>(), zbuf.contiguous().data_ptr<float>(), pix_dists.contiguous().data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(point_idxs, zbuf, pix_dists); } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsCoarseCudaKernel( const float* points, // (P, 3) const int64_t* cloud_to_packed_first_idx, // (N) const int64_t* num_points_per_cloud, // (N) const float* radius, const int N, const int P, const int H, const int W, const int bin_size, const int chunk_size, const int max_points_per_bin, int* points_per_bin, int* bin_points) { extern __shared__ char sbuf[]; const int M = max_points_per_bin; // Integer divide round up const int num_bins_x = 1 + (W - 1) / bin_size; const int num_bins_y = 1 + (H - 1) / bin_size; // NDC range depends on the ratio of W/H // The shorter side from (H, W) is given an NDC range of 2.0 and // the other side is scaled by the ratio of H:W. const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f; const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f; // Size of half a pixel in NDC units is the NDC half range // divided by the corresponding image dimension const float half_pix_x = NDC_x_half_range / W; const float half_pix_y = NDC_y_half_range / H; // This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size); // Have each block handle a chunk of points and build a 3D bitmask in // shared memory to mark which points hit which bins. In this first phase, // each thread processes one point at a time. After processing the chunk, // one thread is assigned per bin, and the thread counts and writes the // points for the bin out to global memory. const int chunks_per_batch = 1 + (P - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; const int chunk_idx = chunk % chunks_per_batch; const int point_start_idx = chunk_idx * chunk_size; binmask.block_clear(); // Using the batch index of the thread get the start and stop // indices for the points. const int64_t cloud_point_start_idx = cloud_to_packed_first_idx[batch_idx]; const int64_t cloud_point_stop_idx = cloud_point_start_idx + num_points_per_cloud[batch_idx]; // Have each thread handle a different point within the chunk for (int p = threadIdx.x; p < chunk_size; p += blockDim.x) { const int p_idx = point_start_idx + p; // Check if point index corresponds to the cloud in the batch given by // batch_idx. if (p_idx >= cloud_point_stop_idx || p_idx < cloud_point_start_idx) { continue; } const float px = points[p_idx * 3 + 0]; const float py = points[p_idx * 3 + 1]; const float pz = points[p_idx * 3 + 2]; const float p_radius = radius[p_idx]; if (pz < 0) continue; // Don't render points behind the camera. const float px0 = px - p_radius; const float px1 = px + p_radius; const float py0 = py - p_radius; const float py1 = py + p_radius; // Brute-force search over all bins; TODO something smarter? // For example we could compute the exact bin where the point falls, // then check neighboring bins. This way we wouldn't have to check // all bins (however then we might have more warp divergence?) for (int by = 0; by < num_bins_y; ++by) { // Get y extent for the bin. PixToNonSquareNdc gives us the location of // the center of each pixel, so we need to add/subtract a half // pixel to get the true extent of the bin. const float by0 = PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y; const float by1 = PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y; const bool y_overlap = (py0 <= by1) && (by0 <= py1); if (!y_overlap) { continue; } for (int bx = 0; bx < num_bins_x; ++bx) { // Get x extent for the bin; again we need to adjust the // output of PixToNonSquareNdc by half a pixel. const float bx0 = PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x; const float bx1 = PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x; const bool x_overlap = (px0 <= bx1) && (bx0 <= px1); if (x_overlap) { binmask.set(by, bx, p); } } } } __syncthreads(); // Now we have processed every point in the current chunk. We need to // count the number of points in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x; byx += blockDim.x) { const int by = byx / num_bins_x; const int bx = byx % num_bins_x; const int count = binmask.count(by, bx); const int points_per_bin_idx = batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx; // This atomically increments the (global) number of points found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_points array for the // points in the current chunk that fall into this bin. const int start = atomicAdd(points_per_bin + points_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_points. int next_idx = batch_idx * num_bins_y * num_bins_x * M + by * num_bins_x * M + bx * M + start; for (int p = 0; p < chunk_size; ++p) { if (binmask.get(by, bx, p)) { // TODO: Throw an error if next_idx >= M -- this means that // we got more than max_points_per_bin in this bin // TODO: check if atomicAdd is needed in line 265. bin_points[next_idx] = point_start_idx + p; next_idx++; } } } __syncthreads(); } } at::Tensor RasterizePointsCoarseCuda( const at::Tensor& points, // (P, 3) const at::Tensor& cloud_to_packed_first_idx, // (N) const at::Tensor& num_points_per_cloud, // (N) const std::tuple<int, int> image_size, const at::Tensor& radius, const int bin_size, const int max_points_per_bin) { TORCH_CHECK( points.ndimension() == 2 && points.size(1) == 3, "points must have dimensions (num_points, 3)"); // Check inputs are on the same device at::TensorArg points_t{points, "points", 1}, cloud_to_packed_first_idx_t{ cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2}, num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3}; at::CheckedFrom c = "RasterizePointsCoarseCuda"; at::checkAllSameGPU( c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(points.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); const int P = points.size(0); const int N = num_points_per_cloud.size(0); const int M = max_points_per_bin; // Integer divide round up. const int num_bins_y = 1 + (H - 1) / bin_size; const int num_bins_x = 1 + (W - 1) / bin_size; if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) { // Make sure we do not use too much shared memory. std::stringstream ss; ss << "In Coarse Rasterizer got num_bins_y: " << num_bins_y << ", num_bins_x: " << num_bins_x << ", " << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = num_points_per_cloud.options().dtype(at::kInt); at::Tensor points_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts); at::Tensor bin_points = at::full({N, num_bins_y, num_bins_x, M}, -1, opts); if (bin_points.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return bin_points; } const int chunk_size = 512; const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; RasterizePointsCoarseCudaKernel<<<blocks, threads, shared_size, stream>>>( points.contiguous().data_ptr<float>(), cloud_to_packed_first_idx.contiguous().data_ptr<int64_t>(), num_points_per_cloud.contiguous().data_ptr<int64_t>(), radius.contiguous().data_ptr<float>(), N, P, H, W, bin_size, chunk_size, M, points_per_bin.contiguous().data_ptr<int32_t>(), bin_points.contiguous().data_ptr<int32_t>()); AT_CUDA_CHECK(cudaGetLastError()); return bin_points; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizePointsFineCudaKernel( const float* points, // (P, 3) const int32_t* bin_points, // (N, BH, BW, T) const float* radius, const int bin_size, const int N, const int BH, // num_bins y const int BW, // num_bins x const int M, const int H, const int W, const int K, int32_t* point_idxs, // (N, H, W, K) float* zbuf, // (N, H, W, K) float* pix_dists) { // (N, H, W, K) // This can be more than H * W if H or W are not divisible by bin_size. const int num_pixels = N * BH * BW * bin_size * bin_size; const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from points and bin_points. int i = pid; const int n = i / (BH * BW * bin_size * bin_size); i %= BH * BW * bin_size * bin_size; const int by = i / (BW * bin_size * bin_size); i %= BW * bin_size * bin_size; const int bx = i / (bin_size * bin_size); i %= bin_size * bin_size; const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; const float xf = PixToNonSquareNdc(xi, W, H); const float yf = PixToNonSquareNdc(yi, H, W); // This part looks like the naive rasterization kernel, except we use // bin_points to only look at a subset of points already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pix q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; ++m) { const int p = bin_points[n * BH * BW * M + by * BW * M + bx * M + m]; if (p < 0) { // bin_points uses -1 as a sentinal value continue; } CheckPixelInsidePoint( points, p, q_size, q_max_z, q_max_idx, q, radius, xf, yf, K); } // Now we've looked at all the points for this bin, so we can write // output for the current pixel. BubbleSort(q, q_size); // Reverse ordering of the X and Y axis as the camera coordinates // assume that +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const int pix_idx = n * H * W * K + yidx * W * K + xidx * K; for (int k = 0; k < q_size; ++k) { point_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist2; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor> RasterizePointsFineCuda( const at::Tensor& points, // (P, 3) const at::Tensor& bin_points, const std::tuple<int, int> image_size, const at::Tensor& radius, const int bin_size, const int points_per_pixel) { // Check inputs are on the same device at::TensorArg points_t{points, "points", 1}, bin_points_t{bin_points, "bin_points", 2}; at::CheckedFrom c = "RasterizePointsFineCuda"; at::checkAllSameGPU(c, {points_t, bin_points_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(points.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int N = bin_points.size(0); const int BH = bin_points.size(1); const int BW = bin_points.size(2); const int M = bin_points.size(3); const int K = points_per_pixel; const int H = std::get<0>(image_size); const int W = std::get<1>(image_size); if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 150"); } auto int_opts = bin_points.options().dtype(at::kInt); auto float_opts = points.options().dtype(at::kFloat); at::Tensor point_idxs = at::full({N, H, W, K}, -1, int_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); if (point_idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(point_idxs, zbuf, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; RasterizePointsFineCudaKernel<<<blocks, threads, 0, stream>>>( points.contiguous().data_ptr<float>(), bin_points.contiguous().data_ptr<int32_t>(), radius.contiguous().data_ptr<float>(), bin_size, N, BH, BW, M, H, W, K, point_idxs.contiguous().data_ptr<int32_t>(), zbuf.contiguous().data_ptr<float>(), pix_dists.contiguous().data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(point_idxs, zbuf, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO(T55115174) Add more documentation for backward kernel. __global__ void RasterizePointsBackwardCudaKernel( const float* points, // (P, 3) const int32_t* idxs, // (N, H, W, K) const int N, const int P, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_dists, // (N, H, W, K) float* grad_points) { // (P, 3) // Parallelized over each of K points per pixel, for each pixel in images of // size H * W, for each image in the batch of size N. int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = tid; i < N * H * W * K; i += num_threads) { // const int n = i / (H * W * K); // batch index (not needed). const int yxk = i % (H * W * K); const int yi = yxk / (W * K); const int xk = yxk % (W * K); const int xi = xk / K; // k = xk % K (We don't actually need k, but this would be it.) // Reverse ordering of X and Y axes. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const float xf = PixToNonSquareNdc(xidx, W, H); const float yf = PixToNonSquareNdc(yidx, H, W); const int p = idxs[i]; if (p < 0) continue; const float grad_dist2 = grad_dists[i]; const int p_ind = p * 3; // index into packed points tensor const float px = points[p_ind + 0]; const float py = points[p_ind + 1]; const float dx = px - xf; const float dy = py - yf; const float grad_px = 2.0f * grad_dist2 * dx; const float grad_py = 2.0f * grad_dist2 * dy; const float grad_pz = grad_zbuf[i]; atomicAdd(grad_points + p_ind + 0, grad_px); atomicAdd(grad_points + p_ind + 1, grad_py); atomicAdd(grad_points + p_ind + 2, grad_pz); } } at::Tensor RasterizePointsBackwardCuda( const at::Tensor& points, // (N, P, 3) const at::Tensor& idxs, // (N, H, W, K) const at::Tensor& grad_zbuf, // (N, H, W, K) const at::Tensor& grad_dists) { // (N, H, W, K) // Check inputs are on the same device at::TensorArg points_t{points, "points", 1}, idxs_t{idxs, "idxs", 2}, grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, grad_dists_t{grad_dists, "grad_dists", 4}; at::CheckedFrom c = "RasterizePointsBackwardCuda"; at::checkAllSameGPU(c, {points_t, idxs_t, grad_zbuf_t, grad_dists_t}); at::checkAllSameType(c, {points_t, grad_zbuf_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(points.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int P = points.size(0); const int N = idxs.size(0); const int H = idxs.size(1); const int W = idxs.size(2); const int K = idxs.size(3); at::Tensor grad_points = at::zeros({P, 3}, points.options()); if (grad_points.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_points; } const size_t blocks = 1024; const size_t threads = 64; RasterizePointsBackwardCudaKernel<<<blocks, threads, 0, stream>>>( points.contiguous().data_ptr<float>(), idxs.contiguous().data_ptr<int32_t>(), N, P, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_points.contiguous().data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return grad_points; }
ebf80ccca0541f86901fa6866550070e32d36ab4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand.h> #include <stdio.h> #include <assert.h> #include <iostream> #include <fstream> #include <sys/time.h> using namespace std; // image size int rows = 1224, cols = 1624; int imgSize = rows*cols; // iterations for stereo matching algorithm int iteration = 1; // disparity range int Dmin = 1; int Dmax = 80; int Drange = Dmax - Dmin + 1; //int winRadius = 9; // device image pointer float* dLImgPtr_f = NULL; float* dRImgPtr_f = NULL; size_t lPitch, rPitch; // texture memory for stereo image pair <Type, Dim, ReadMode> texture<float, 2, hipReadModeElementType> lTex; texture<float, 2, hipReadModeElementType> rTex; // timing arrays const int nt = 2; double start[nt], end[nt]; double random_start[nt], random_end[nt]; double main_start[nt], main_end[nt]; // evaluate window-based disimilarity __device__ float evaluateCost(float u, float v, float matchIdx, int cols, int rows, int winRadius) { float cost = 0.0f; for(int h=-winRadius; h<=winRadius; h++) { for(int w=-winRadius; w<=winRadius; w++) { cost += fabsf(tex2D(lTex, matchIdx+ w/(float)cols, v+h/(float)rows) - tex2D(rTex, u+w/(float)cols, v+h/(float)rows)); } } return cost; } // disparity pointer in device global memory __global__ void stereoMatching(float* dRDispPtr, float* dRPlanes, int cols, int rows, hiprandState_t* states, int iteration) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int winRadius = 9; // does not need to process borders if(x>=cols-winRadius || x<winRadius || y>=rows-winRadius || y<winRadius) return; float u = x/(float)cols; float v = y/(float)rows; int idx = y*cols +x; // if 1st iteration, enforce planes to be fronto-parallel if(iteration != 0) { // x of a unit normal vector dRPlanes[idx*3] = 0.0f; // y dRPlanes[idx*3+1] = 0.0f; // z dRPlanes[idx*3+2] = 1.0f; } // evaluate disparity of current pixel float min_cost = 0.0f; float cost = 0.0f; float tmp_disp = dRDispPtr[idx]; float matchIdx = u + tmp_disp*80.0f/(float)cols; min_cost = evaluateCost(u, v, matchIdx, cols, rows, winRadius); // evaluate disparity of left neighbor cost = 0.0f; tmp_disp = dRDispPtr[idx-1]; matchIdx = u + tmp_disp*80.0f/(float)cols; cost = evaluateCost(u, v, matchIdx, cols, rows, winRadius); // update current disparity if lower cost from neighbor's if(cost < min_cost) { min_cost = cost; dRDispPtr[idx] = tmp_disp; } // evaluate disparity of upper neighbor cost = 0.0f; tmp_disp = dRDispPtr[idx-cols]; matchIdx = u + tmp_disp*80.0f/(float)cols; cost = evaluateCost(u, v, matchIdx, cols, rows, winRadius); if(cost < min_cost) { min_cost = cost; dRDispPtr[idx] = tmp_disp; } // evaluate another valid random disparitiy (within border) in case it is trapped at a local minima matchIdx= -1.0f; while(matchIdx <(float)winRadius/cols || matchIdx >=(float)(cols-winRadius)/cols ) { tmp_disp = hiprand_uniform(&states[idx]); matchIdx = u + tmp_disp*80.0f/(float)cols; } cost = evaluateCost(u, v, matchIdx, cols, rows, winRadius); if(cost<min_cost) { min_cost = cost; dRDispPtr[idx] = tmp_disp; } return; } // initialize random states __global__ void init(unsigned int seed, hiprandState_t* states, int cols) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = y*cols+x; hiprand_init(seed, idx, 0, &states[idx]); } // read .pgm image int readPGM(const char* imgName, char* imgPtr) { FILE *filePtr; const int MAXLENGTH = 50; // input line for header lines // 1st line: P5 // 2nd line(image size): 1624 1224 // 3rdline(max pixel value): 255 // the rest are binary image data char line[MAXLENGTH]; // open file if( (filePtr = fopen(imgName, "rb")) == NULL ) { cout<<"Can not open"<<endl; fclose(filePtr); return -1; } // read first line fgets(line, MAXLENGTH, filePtr); if(line[0] != 'P' || line[1] != '5') { cout<<"Not P5 pgm format"; fclose(filePtr); return -1; } // image size fgets(line, MAXLENGTH, filePtr); // max pixel value fgets(line, MAXLENGTH, filePtr); for (int i = 0; i < rows; i++) { fread(&imgPtr[i*cols], sizeof(char), cols, filePtr); if (feof(filePtr)) break; } fclose(filePtr); return 0; } int writePGM(const char* imgName, char* imgPtr) { ofstream f(imgName, std::ios_base::out | std::ios_base::binary | std::ios_base::trunc); // image size const char widthStr[] = "1624"; const char heightStr[] = "1224"; f << "P5\n" << widthStr << " " << heightStr << "\n255"; for(int i=0; i<rows; i++) f.write(reinterpret_cast<const char*>(&imgPtr[i*cols]), cols); return 0; } // convert char image to float image and normalize to [0,1] // if reverse is true, convert float to char int imgCharToFloat(char* imgCharPtr, float* imgFloatPtr, bool reverse) { if(!reverse) { // #pragma omp parallel for for(int i=0; i<imgSize; i++) imgFloatPtr[i] = (float)imgCharPtr[i];///255.0f; } else { // #pragma omp parallel for for(int i=0; i<imgSize; i++) imgCharPtr[i] = (char)(imgFloatPtr[i]*80.0f); } return 0; } // for timing struct timeval timerStart; void StartTimer() { gettimeofday(&timerStart, NULL); } // time elapsed in ms double GetTimer() { struct timeval timerStop, timerElapsed; gettimeofday(&timerStop, NULL); timersub(&timerStop, &timerStart, &timerElapsed); return timerElapsed.tv_sec*1000.0+timerElapsed.tv_usec/1000.0; } void timingStat(double* start, double* end, int nt, double* average, double* sd) { *average = 0.0; for(int i=0; i<nt; i++) *average += end[i] - start[i]; *average /= (double)nt; *sd = 0.0; for(int i=0; i<nt; i++) *sd += pow(end[i] - start[i] - *average, 2); *sd = sqrt(*sd/(double)(nt-1)); return; } int main(int argc, char** argv) { const char leftImgName[] = "l.pgm"; const char rightImgName[] = "r.pgm"; // allocate left image (grayscale) char* lImgPtr_8u = new char[imgSize]; if(readPGM(leftImgName, lImgPtr_8u) < 0) { cout<<"read left image fail"<<endl; delete[] lImgPtr_8u; return -1; } // allocate right image char* rImgPtr_8u = new char[imgSize]; if(readPGM(rightImgName, rImgPtr_8u) < 0) { cout<<"read right image fail"<<endl; delete[] rImgPtr_8u; return -1; } // convert image type from char to float float* lImgPtr_f = new float[imgSize]; imgCharToFloat(lImgPtr_8u, lImgPtr_f, false); float* rImgPtr_f = new float[imgSize]; imgCharToFloat(rImgPtr_8u, rImgPtr_f, false); // allocate pitch memory on device for left and right image if(hipSuccess != hipMallocPitch(&dLImgPtr_f, &lPitch, cols*sizeof(float), rows)) cout<<"MallocPitch left error"<<endl; if(hipSuccess != hipMallocPitch(&dRImgPtr_f, &rPitch, cols*sizeof(float), rows)) cout<<"MallocPitch right error"<<endl; // allocate global memory on device for right disparity map float* dRDisp; if(hipSuccess != hipMalloc(&dRDisp, cols*sizeof(float)*rows)) cout<<"Malloc disp error"<<endl; // allocate global memory on device for right planes float* dRPlanes; if(hipSuccess != hipMalloc(&dRPlanes, cols*3*sizeof(float)*rows)) cout<<"Malloc planes error"<<endl; // copy images from host to device if(hipSuccess != hipMemcpy2D(dLImgPtr_f, lPitch, lImgPtr_f, sizeof(float)*cols, sizeof(float)*cols, rows, hipMemcpyHostToDevice)) cout<<"Memcpy2D left error"<<endl; if(hipSuccess != hipMemcpy2D(dRImgPtr_f, rPitch, rImgPtr_f, sizeof(float)*cols, sizeof(float)*cols, rows, hipMemcpyHostToDevice)) cout<<"Memcpy2D right error"<<endl; // setup texture lTex.addressMode[0] = hipAddressModeClamp; lTex.addressMode[1] = hipAddressModeClamp; lTex.filterMode = hipFilterModeLinear; lTex.normalized = true; rTex.addressMode[0] = hipAddressModeClamp; rTex.addressMode[1] = hipAddressModeClamp; rTex.filterMode = hipFilterModeLinear; rTex.normalized = true; // Bind linear memory to the texture memory hipChannelFormatDesc desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); if(hipSuccess != hipBindTexture2D(0, lTex, dLImgPtr_f, desc, cols, rows, lPitch)) cout<<"Bind left tex error"<<endl; if(hipSuccess != hipBindTexture2D(0, rTex, dRImgPtr_f, desc, cols, rows, rPitch)) cout<<"Bind right tex error"<<endl; // launch kernel dim3 blockSize(16, 16); dim3 gridSize( (cols + blockSize.x - 1)/blockSize.x, (rows + blockSize.x - 1)/blockSize.x); StartTimer(); // allocate memory for states hiprandState_t* states; hipMalloc(&states, imgSize*sizeof(hiprandState_t)); // initialize random states hipLaunchKernelGGL(( init), dim3(gridSize), dim3(blockSize), 0, 0, 1234, states, cols); hipDeviceSynchronize(); cout<<"Init states time: "<<GetTimer()<<"ms"<<endl; hiprandGenerator_t gen; for(int t=0; t<=nt; t++) { hipDeviceSynchronize(); if(t>0) { StartTimer(); random_start[t-1] = 0.0; } // host CURAND hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); // set seed hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL); hiprandGenerateUniform(gen, dRDisp, imgSize); hipDeviceSynchronize(); if(t>0) random_end[t-1] = GetTimer(); hipDeviceSynchronize(); if(t>0) { StartTimer(); main_start[t-1] = 0.0; } for(int i=0; i<iteration; i++) { hipLaunchKernelGGL(( stereoMatching), dim3(gridSize), dim3(blockSize), 0, 0, dRDisp, dRPlanes, cols, rows, states, i); hipDeviceSynchronize(); } if(t>0) main_end[t-1] = GetTimer(); } // copy disparity map from global memory on device to host hipMemcpy(lImgPtr_f, dRDisp, sizeof(float)*cols*rows, hipMemcpyDeviceToHost); //float to char imgCharToFloat(lImgPtr_8u, lImgPtr_f, true); double average = 0.0, sd = 0.0; timingStat(random_start, random_end, nt, &average, &sd); cout<<"initial random disp: "<<average<<"ms sd"<<sd<<endl; timingStat(main_start, main_end, nt, &average, &sd); cout<<"main: "<<average<<"ms sd"<<sd<<endl; // Free device memory hipFree(dLImgPtr_f); hipFree(dRImgPtr_f); hipFree(dRDisp); hipFree(dRPlanes); hipFree(states); hiprandDestroyGenerator(gen); hipDeviceReset(); // write image writePGM("disp_cuda_10iter.pgm", lImgPtr_8u); delete[] lImgPtr_8u; delete[] rImgPtr_8u; delete[] lImgPtr_f; delete[] rImgPtr_f; return 0; }
ebf80ccca0541f86901fa6866550070e32d36ab4.cu
#include <cuda.h> #include <curand_kernel.h> #include <curand.h> #include <stdio.h> #include <assert.h> #include <iostream> #include <fstream> #include <sys/time.h> using namespace std; // image size int rows = 1224, cols = 1624; int imgSize = rows*cols; // iterations for stereo matching algorithm int iteration = 1; // disparity range int Dmin = 1; int Dmax = 80; int Drange = Dmax - Dmin + 1; //int winRadius = 9; // device image pointer float* dLImgPtr_f = NULL; float* dRImgPtr_f = NULL; size_t lPitch, rPitch; // texture memory for stereo image pair <Type, Dim, ReadMode> texture<float, 2, cudaReadModeElementType> lTex; texture<float, 2, cudaReadModeElementType> rTex; // timing arrays const int nt = 2; double start[nt], end[nt]; double random_start[nt], random_end[nt]; double main_start[nt], main_end[nt]; // evaluate window-based disimilarity __device__ float evaluateCost(float u, float v, float matchIdx, int cols, int rows, int winRadius) { float cost = 0.0f; for(int h=-winRadius; h<=winRadius; h++) { for(int w=-winRadius; w<=winRadius; w++) { cost += fabsf(tex2D(lTex, matchIdx+ w/(float)cols, v+h/(float)rows) - tex2D(rTex, u+w/(float)cols, v+h/(float)rows)); } } return cost; } // disparity pointer in device global memory __global__ void stereoMatching(float* dRDispPtr, float* dRPlanes, int cols, int rows, curandState* states, int iteration) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int winRadius = 9; // does not need to process borders if(x>=cols-winRadius || x<winRadius || y>=rows-winRadius || y<winRadius) return; float u = x/(float)cols; float v = y/(float)rows; int idx = y*cols +x; // if 1st iteration, enforce planes to be fronto-parallel if(iteration != 0) { // x of a unit normal vector dRPlanes[idx*3] = 0.0f; // y dRPlanes[idx*3+1] = 0.0f; // z dRPlanes[idx*3+2] = 1.0f; } // evaluate disparity of current pixel float min_cost = 0.0f; float cost = 0.0f; float tmp_disp = dRDispPtr[idx]; float matchIdx = u + tmp_disp*80.0f/(float)cols; min_cost = evaluateCost(u, v, matchIdx, cols, rows, winRadius); // evaluate disparity of left neighbor cost = 0.0f; tmp_disp = dRDispPtr[idx-1]; matchIdx = u + tmp_disp*80.0f/(float)cols; cost = evaluateCost(u, v, matchIdx, cols, rows, winRadius); // update current disparity if lower cost from neighbor's if(cost < min_cost) { min_cost = cost; dRDispPtr[idx] = tmp_disp; } // evaluate disparity of upper neighbor cost = 0.0f; tmp_disp = dRDispPtr[idx-cols]; matchIdx = u + tmp_disp*80.0f/(float)cols; cost = evaluateCost(u, v, matchIdx, cols, rows, winRadius); if(cost < min_cost) { min_cost = cost; dRDispPtr[idx] = tmp_disp; } // evaluate another valid random disparitiy (within border) in case it is trapped at a local minima matchIdx= -1.0f; while(matchIdx <(float)winRadius/cols || matchIdx >=(float)(cols-winRadius)/cols ) { tmp_disp = curand_uniform(&states[idx]); matchIdx = u + tmp_disp*80.0f/(float)cols; } cost = evaluateCost(u, v, matchIdx, cols, rows, winRadius); if(cost<min_cost) { min_cost = cost; dRDispPtr[idx] = tmp_disp; } return; } // initialize random states __global__ void init(unsigned int seed, curandState_t* states, int cols) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = y*cols+x; curand_init(seed, idx, 0, &states[idx]); } // read .pgm image int readPGM(const char* imgName, char* imgPtr) { FILE *filePtr; const int MAXLENGTH = 50; // input line for header lines // 1st line: P5 // 2nd line(image size): 1624 1224 // 3rdline(max pixel value): 255 // the rest are binary image data char line[MAXLENGTH]; // open file if( (filePtr = fopen(imgName, "rb")) == NULL ) { cout<<"Can not open"<<endl; fclose(filePtr); return -1; } // read first line fgets(line, MAXLENGTH, filePtr); if(line[0] != 'P' || line[1] != '5') { cout<<"Not P5 pgm format"; fclose(filePtr); return -1; } // image size fgets(line, MAXLENGTH, filePtr); // max pixel value fgets(line, MAXLENGTH, filePtr); for (int i = 0; i < rows; i++) { fread(&imgPtr[i*cols], sizeof(char), cols, filePtr); if (feof(filePtr)) break; } fclose(filePtr); return 0; } int writePGM(const char* imgName, char* imgPtr) { ofstream f(imgName, std::ios_base::out | std::ios_base::binary | std::ios_base::trunc); // image size const char widthStr[] = "1624"; const char heightStr[] = "1224"; f << "P5\n" << widthStr << " " << heightStr << "\n255"; for(int i=0; i<rows; i++) f.write(reinterpret_cast<const char*>(&imgPtr[i*cols]), cols); return 0; } // convert char image to float image and normalize to [0,1] // if reverse is true, convert float to char int imgCharToFloat(char* imgCharPtr, float* imgFloatPtr, bool reverse) { if(!reverse) { // #pragma omp parallel for for(int i=0; i<imgSize; i++) imgFloatPtr[i] = (float)imgCharPtr[i];///255.0f; } else { // #pragma omp parallel for for(int i=0; i<imgSize; i++) imgCharPtr[i] = (char)(imgFloatPtr[i]*80.0f); } return 0; } // for timing struct timeval timerStart; void StartTimer() { gettimeofday(&timerStart, NULL); } // time elapsed in ms double GetTimer() { struct timeval timerStop, timerElapsed; gettimeofday(&timerStop, NULL); timersub(&timerStop, &timerStart, &timerElapsed); return timerElapsed.tv_sec*1000.0+timerElapsed.tv_usec/1000.0; } void timingStat(double* start, double* end, int nt, double* average, double* sd) { *average = 0.0; for(int i=0; i<nt; i++) *average += end[i] - start[i]; *average /= (double)nt; *sd = 0.0; for(int i=0; i<nt; i++) *sd += pow(end[i] - start[i] - *average, 2); *sd = sqrt(*sd/(double)(nt-1)); return; } int main(int argc, char** argv) { const char leftImgName[] = "l.pgm"; const char rightImgName[] = "r.pgm"; // allocate left image (grayscale) char* lImgPtr_8u = new char[imgSize]; if(readPGM(leftImgName, lImgPtr_8u) < 0) { cout<<"read left image fail"<<endl; delete[] lImgPtr_8u; return -1; } // allocate right image char* rImgPtr_8u = new char[imgSize]; if(readPGM(rightImgName, rImgPtr_8u) < 0) { cout<<"read right image fail"<<endl; delete[] rImgPtr_8u; return -1; } // convert image type from char to float float* lImgPtr_f = new float[imgSize]; imgCharToFloat(lImgPtr_8u, lImgPtr_f, false); float* rImgPtr_f = new float[imgSize]; imgCharToFloat(rImgPtr_8u, rImgPtr_f, false); // allocate pitch memory on device for left and right image if(cudaSuccess != cudaMallocPitch(&dLImgPtr_f, &lPitch, cols*sizeof(float), rows)) cout<<"MallocPitch left error"<<endl; if(cudaSuccess != cudaMallocPitch(&dRImgPtr_f, &rPitch, cols*sizeof(float), rows)) cout<<"MallocPitch right error"<<endl; // allocate global memory on device for right disparity map float* dRDisp; if(cudaSuccess != cudaMalloc(&dRDisp, cols*sizeof(float)*rows)) cout<<"Malloc disp error"<<endl; // allocate global memory on device for right planes float* dRPlanes; if(cudaSuccess != cudaMalloc(&dRPlanes, cols*3*sizeof(float)*rows)) cout<<"Malloc planes error"<<endl; // copy images from host to device if(cudaSuccess != cudaMemcpy2D(dLImgPtr_f, lPitch, lImgPtr_f, sizeof(float)*cols, sizeof(float)*cols, rows, cudaMemcpyHostToDevice)) cout<<"Memcpy2D left error"<<endl; if(cudaSuccess != cudaMemcpy2D(dRImgPtr_f, rPitch, rImgPtr_f, sizeof(float)*cols, sizeof(float)*cols, rows, cudaMemcpyHostToDevice)) cout<<"Memcpy2D right error"<<endl; // setup texture lTex.addressMode[0] = cudaAddressModeClamp; lTex.addressMode[1] = cudaAddressModeClamp; lTex.filterMode = cudaFilterModeLinear; lTex.normalized = true; rTex.addressMode[0] = cudaAddressModeClamp; rTex.addressMode[1] = cudaAddressModeClamp; rTex.filterMode = cudaFilterModeLinear; rTex.normalized = true; // Bind linear memory to the texture memory cudaChannelFormatDesc desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); if(cudaSuccess != cudaBindTexture2D(0, lTex, dLImgPtr_f, desc, cols, rows, lPitch)) cout<<"Bind left tex error"<<endl; if(cudaSuccess != cudaBindTexture2D(0, rTex, dRImgPtr_f, desc, cols, rows, rPitch)) cout<<"Bind right tex error"<<endl; // launch kernel dim3 blockSize(16, 16); dim3 gridSize( (cols + blockSize.x - 1)/blockSize.x, (rows + blockSize.x - 1)/blockSize.x); StartTimer(); // allocate memory for states curandState_t* states; cudaMalloc(&states, imgSize*sizeof(curandState_t)); // initialize random states init<<<gridSize, blockSize>>>(1234, states, cols); cudaDeviceSynchronize(); cout<<"Init states time: "<<GetTimer()<<"ms"<<endl; curandGenerator_t gen; for(int t=0; t<=nt; t++) { cudaDeviceSynchronize(); if(t>0) { StartTimer(); random_start[t-1] = 0.0; } // host CURAND curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); // set seed curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); curandGenerateUniform(gen, dRDisp, imgSize); cudaDeviceSynchronize(); if(t>0) random_end[t-1] = GetTimer(); cudaDeviceSynchronize(); if(t>0) { StartTimer(); main_start[t-1] = 0.0; } for(int i=0; i<iteration; i++) { stereoMatching<<<gridSize, blockSize>>>(dRDisp, dRPlanes, cols, rows, states, i); cudaDeviceSynchronize(); } if(t>0) main_end[t-1] = GetTimer(); } // copy disparity map from global memory on device to host cudaMemcpy(lImgPtr_f, dRDisp, sizeof(float)*cols*rows, cudaMemcpyDeviceToHost); //float to char imgCharToFloat(lImgPtr_8u, lImgPtr_f, true); double average = 0.0, sd = 0.0; timingStat(random_start, random_end, nt, &average, &sd); cout<<"initial random disp: "<<average<<"ms sd"<<sd<<endl; timingStat(main_start, main_end, nt, &average, &sd); cout<<"main: "<<average<<"ms sd"<<sd<<endl; // Free device memory cudaFree(dLImgPtr_f); cudaFree(dRImgPtr_f); cudaFree(dRDisp); cudaFree(dRPlanes); cudaFree(states); curandDestroyGenerator(gen); cudaDeviceReset(); // write image writePGM("disp_cuda_10iter.pgm", lImgPtr_8u); delete[] lImgPtr_8u; delete[] rImgPtr_8u; delete[] lImgPtr_f; delete[] rImgPtr_f; return 0; }
025ca0589be6a386bc2bd7953ca000afa0fc204e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Find indices for each attention pattern Written by Jiageng Mao */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "build_attention_indices_gpu.h" #include "votr_cuda_utils.h" __device__ int simple_hash(int k, int hash_size) { return k % hash_size; } __device__ int hash_table_find(int &key, int &hash_size, const int *xyz_to_vidx) { int hash_idx = simple_hash(key, hash_size); int v_idx = EMPTY_KEY; int prob_cnt = 0; while (true) { // found if (xyz_to_vidx[hash_idx * 2 + 0] == key) { v_idx = xyz_to_vidx[hash_idx * 2 + 1]; break; } // empty, not found if (xyz_to_vidx[hash_idx * 2 + 0] == EMPTY_KEY) { break; } // linear probing hash_idx = (hash_idx + 1) % hash_size; // security in case of dead loop prob_cnt += 1; if (prob_cnt >= hash_size) break; } return v_idx; } __global__ void sparse_local_attention_with_tensor_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int attend_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { /* in sparse attention, voxels are not necessary at the non-empty location attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; xyz_to_vidx += bs_idx * x_max * y_max * z_max; int num_samples = 0; for (int sz_idx = z_idx * z_stride - attend_range; sz_idx <= z_idx * z_stride + (z_stride - 1) + attend_range; ++sz_idx){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx * y_stride - attend_range; sy_idx <= y_idx * y_stride + (y_stride - 1) + attend_range; ++sy_idx){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx * x_stride - attend_range; sx_idx <= x_idx * x_stride + (x_stride - 1) + attend_range; ++sx_idx){ if (sx_idx >= x_max || sx_idx < 0) continue; int sv_idx = xyz_to_vidx[sx_idx * y_max * z_max + sy_idx * z_max + sz_idx]; if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } return; } void sparse_local_attention_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int attend_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { hipError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( sparse_local_attention_with_tensor_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, x_stride, y_stride, z_stride, num_voxels, attend_size, attend_range, attend_indices, v_indices, xyz_to_vidx); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void sparse_local_attention_with_hash_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int attend_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { /* in sparse attention, voxels are not necessary at the non-empty location attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, hash_size, 2] voxel coordinates to voxel indices */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; xyz_to_vidx += bs_idx * hash_size * 2; int num_samples = 0; for (int sz_idx = z_idx * z_stride - attend_range; sz_idx <= z_idx * z_stride + (z_stride - 1) + attend_range; ++sz_idx){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx * y_stride - attend_range; sy_idx <= y_idx * y_stride + (y_stride - 1) + attend_range; ++sy_idx){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx * x_stride - attend_range; sx_idx <= x_idx * x_stride + (x_stride - 1) + attend_range; ++sx_idx){ if (sx_idx >= x_max || sx_idx < 0) continue; int skey = sx_idx * y_max * z_max + sy_idx * z_max + sz_idx; int sv_idx = hash_table_find(skey, hash_size, xyz_to_vidx); if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } return; } void sparse_local_attention_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int attend_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { hipError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( sparse_local_attention_with_hash_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, x_stride, y_stride, z_stride, num_voxels, attend_size, attend_range, hash_size, attend_indices, v_indices, xyz_to_vidx); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void subm_local_attention_with_tensor_kernel(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int attend_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * x_max * y_max * z_max; int num_samples = 0; for (int sz_idx = z_idx - attend_range; sz_idx <= z_idx + attend_range; ++sz_idx){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx - attend_range; sy_idx <= y_idx + attend_range; ++sy_idx){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx - attend_range; sx_idx <= x_idx + attend_range; ++sx_idx){ if (sx_idx >= x_max || sx_idx < 0) continue; int sv_idx = xyz_to_vidx[sx_idx * y_max * z_max + sy_idx * z_max + sz_idx]; if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } return; } void subm_local_attention_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int attend_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx){ hipError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( subm_local_attention_with_tensor_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, num_voxels, attend_size, attend_range, attend_indices, v_indices, xyz_to_vidx); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void subm_local_attention_with_hash_kernel(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int attend_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * hash_size * 2; int num_samples = 0; for (int sz_idx = z_idx - attend_range; sz_idx <= z_idx + attend_range; ++sz_idx){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx - attend_range; sy_idx <= y_idx + attend_range; ++sy_idx){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx - attend_range; sx_idx <= x_idx + attend_range; ++sx_idx){ if (sx_idx >= x_max || sx_idx < 0) continue; int skey = sx_idx * y_max * z_max + sy_idx * z_max + sz_idx; int sv_idx = hash_table_find(skey, hash_size, xyz_to_vidx); if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } return; } void subm_local_attention_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int attend_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx){ hipError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( subm_local_attention_with_hash_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, num_voxels, attend_size, attend_range, hash_size, attend_indices, v_indices, xyz_to_vidx); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void sparse_strided_attention_with_tensor_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int num_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices range_spec: [num_range, 3] half start/end range & stride */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * x_max * y_max * z_max; int num_samples = 0; for (int range_idx = 0; range_idx < num_range; ++range_idx) { int search_x_start_range = range_spec[range_idx * 9 + 0]; int search_x_end_range = range_spec[range_idx * 9 + 1]; int search_x_stride = range_spec[range_idx * 9 + 2]; int search_y_start_range = range_spec[range_idx * 9 + 3]; int search_y_end_range = range_spec[range_idx * 9 + 4]; int search_y_stride = range_spec[range_idx * 9 + 5]; int search_z_start_range = range_spec[range_idx * 9 + 6]; int search_z_end_range = range_spec[range_idx * 9 + 7]; int search_z_stride = range_spec[range_idx * 9 + 8]; for (int z_offset = 0; z_offset < search_z_end_range; z_offset += search_z_stride) { for (int y_offset = 0; y_offset < search_y_end_range; y_offset += search_y_stride) { for (int x_offset = 0; x_offset < search_x_end_range; x_offset += search_x_stride) { if ((x_offset < search_x_start_range) && (y_offset < search_y_start_range) && (z_offset < search_z_start_range)) { continue; } // each loop process 8 points for (int sz_idx = z_idx * z_stride - z_offset; sz_idx <= z_idx * z_stride + (z_stride - 1) + z_offset; sz_idx += (2 * z_offset + z_stride - 1)){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx * y_stride - y_offset; sy_idx <= y_idx * y_stride + (y_stride - 1) + y_offset; sy_idx += (2 * y_offset + y_stride - 1)){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx * x_stride - x_offset; sx_idx <= x_idx * x_stride + (x_stride - 1) + x_offset; sx_idx += (2 * x_offset + x_stride - 1)){ if (sx_idx >= x_max || sx_idx < 0) continue; int sv_idx = xyz_to_vidx[sx_idx * y_max * z_max + sy_idx * z_max + sz_idx]; if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } } } } } return; } void sparse_strided_attention_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int num_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec){ hipError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( sparse_strided_attention_with_tensor_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, x_stride, y_stride, z_stride, num_voxels, attend_size, num_range, attend_indices, v_indices, xyz_to_vidx, range_spec); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void sparse_strided_attention_with_hash_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int num_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, hash_size, 2] voxel coordinates to voxel indices range_spec: [num_range, 3] half start/end range & stride */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * hash_size * 2; int num_samples = 0; for (int range_idx = 0; range_idx < num_range; ++range_idx) { int search_x_start_range = range_spec[range_idx * 9 + 0]; int search_x_end_range = range_spec[range_idx * 9 + 1]; int search_x_stride = range_spec[range_idx * 9 + 2]; int search_y_start_range = range_spec[range_idx * 9 + 3]; int search_y_end_range = range_spec[range_idx * 9 + 4]; int search_y_stride = range_spec[range_idx * 9 + 5]; int search_z_start_range = range_spec[range_idx * 9 + 6]; int search_z_end_range = range_spec[range_idx * 9 + 7]; int search_z_stride = range_spec[range_idx * 9 + 8]; for (int z_offset = 0; z_offset < search_z_end_range; z_offset += search_z_stride) { for (int y_offset = 0; y_offset < search_y_end_range; y_offset += search_y_stride) { for (int x_offset = 0; x_offset < search_x_end_range; x_offset += search_x_stride) { if ((x_offset < search_x_start_range) && (y_offset < search_y_start_range) && (z_offset < search_z_start_range)) { continue; } // each loop process 8 points for (int sz_idx = z_idx * z_stride - z_offset; sz_idx <= z_idx * z_stride + (z_stride - 1) + z_offset; sz_idx += (2 * z_offset + z_stride - 1)){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx * y_stride - y_offset; sy_idx <= y_idx * y_stride + (y_stride - 1) + y_offset; sy_idx += (2 * y_offset + y_stride - 1)){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx * x_stride - x_offset; sx_idx <= x_idx * x_stride + (x_stride - 1) + x_offset; sx_idx += (2 * x_offset + x_stride - 1)){ if (sx_idx >= x_max || sx_idx < 0) continue; int skey = sx_idx * y_max * z_max + sy_idx * z_max + sz_idx; int sv_idx = hash_table_find(skey, hash_size, xyz_to_vidx); if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } } } } } return; } void sparse_strided_attention_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int num_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec){ hipError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( sparse_strided_attention_with_hash_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, x_stride, y_stride, z_stride, num_voxels, attend_size, num_range, hash_size, attend_indices, v_indices, xyz_to_vidx, range_spec); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void subm_strided_attention_with_tensor_kernel(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int num_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices range_spec: [num_range, 3] half start/end range & stride */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * x_max * y_max * z_max; int num_samples = 0; for (int range_idx = 0; range_idx < num_range; ++range_idx) { int search_x_start_range = range_spec[range_idx * 9 + 0]; int search_x_end_range = range_spec[range_idx * 9 + 1]; int search_x_stride = range_spec[range_idx * 9 + 2]; int search_y_start_range = range_spec[range_idx * 9 + 3]; int search_y_end_range = range_spec[range_idx * 9 + 4]; int search_y_stride = range_spec[range_idx * 9 + 5]; int search_z_start_range = range_spec[range_idx * 9 + 6]; int search_z_end_range = range_spec[range_idx * 9 + 7]; int search_z_stride = range_spec[range_idx * 9 + 8]; int x_step = 0; int y_step = 0; int z_step = 0; for (int z_offset = 0; z_offset < search_z_end_range; z_offset += search_z_stride) { for (int y_offset = 0; y_offset < search_y_end_range; y_offset += search_y_stride) { for (int x_offset = 0; x_offset < search_x_end_range; x_offset += search_x_stride) { if ((x_offset < search_x_start_range) && (y_offset < search_y_start_range) && (z_offset < search_z_start_range)) { continue; } // each loop process 8 points if (z_offset == 0) { z_step = 1; } else { z_step = 2 * z_offset; } for (int sz_idx = z_idx - z_offset; sz_idx <= z_idx + z_offset; sz_idx += z_step){ if (sz_idx >= z_max || sz_idx < 0) continue; if (sz_idx >= z_max || sz_idx < 0) continue; if (y_offset == 0) { y_step = 1; } else { y_step = 2 * y_offset; } for (int sy_idx = y_idx - y_offset; sy_idx <= y_idx + y_offset; sy_idx += y_step){ if (sy_idx >= y_max || sy_idx < 0) continue; if (x_offset == 0) { x_step = 1; } else { x_step = 2 * x_offset; } for (int sx_idx = x_idx - x_offset; sx_idx <= x_idx + x_offset; sx_idx += x_step){ if (sx_idx >= x_max || sx_idx < 0) continue; int sv_idx = xyz_to_vidx[sx_idx * y_max * z_max + sy_idx * z_max + sz_idx]; if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } } } } } return; } void subm_strided_attention_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int num_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec){ hipError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( subm_strided_attention_with_tensor_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, num_voxels, attend_size, num_range, attend_indices, v_indices, xyz_to_vidx, range_spec); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void subm_strided_attention_with_hash_kernel(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int num_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, hash_size, 2] voxel coordinates to voxel indices range_spec: [num_range, 3] half start/end range & stride */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * hash_size * 2; int num_samples = 0; for (int range_idx = 0; range_idx < num_range; ++range_idx) { int search_x_start_range = range_spec[range_idx * 9 + 0]; int search_x_end_range = range_spec[range_idx * 9 + 1]; int search_x_stride = range_spec[range_idx * 9 + 2]; int search_y_start_range = range_spec[range_idx * 9 + 3]; int search_y_end_range = range_spec[range_idx * 9 + 4]; int search_y_stride = range_spec[range_idx * 9 + 5]; int search_z_start_range = range_spec[range_idx * 9 + 6]; int search_z_end_range = range_spec[range_idx * 9 + 7]; int search_z_stride = range_spec[range_idx * 9 + 8]; int x_step = 0; int y_step = 0; int z_step = 0; for (int z_offset = 0; z_offset < search_z_end_range; z_offset += search_z_stride) { for (int y_offset = 0; y_offset < search_y_end_range; y_offset += search_y_stride) { for (int x_offset = 0; x_offset < search_x_end_range; x_offset += search_x_stride) { if ((x_offset < search_x_start_range) && (y_offset < search_y_start_range) && (z_offset < search_z_start_range)) { continue; } // each loop process 8 points if (z_offset == 0) { z_step = 1; } else { z_step = 2 * z_offset; } for (int sz_idx = z_idx - z_offset; sz_idx <= z_idx + z_offset; sz_idx += z_step){ if (sz_idx >= z_max || sz_idx < 0) continue; if (y_offset == 0) { y_step = 1; } else { y_step = 2 * y_offset; } for (int sy_idx = y_idx - y_offset; sy_idx <= y_idx + y_offset; sy_idx += y_step){ if (sy_idx >= y_max || sy_idx < 0) continue; if (x_offset == 0) { x_step = 1; } else { x_step = 2 * x_offset; } for (int sx_idx = x_idx - x_offset; sx_idx <= x_idx + x_offset; sx_idx += x_step){ if (sx_idx >= x_max || sx_idx < 0) continue; int skey = sx_idx * y_max * z_max + sy_idx * z_max + sz_idx; int sv_idx = hash_table_find(skey, hash_size, xyz_to_vidx); if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } } } } } return; } void subm_strided_attention_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int num_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec){ hipError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( subm_strided_attention_with_hash_kernel), dim3(blocks), dim3(threads), 0, 0, x_max, y_max, z_max, num_voxels, attend_size, num_range, hash_size, attend_indices, v_indices, xyz_to_vidx, range_spec); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
025ca0589be6a386bc2bd7953ca000afa0fc204e.cu
/* Find indices for each attention pattern Written by Jiageng Mao */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "build_attention_indices_gpu.h" #include "votr_cuda_utils.h" __device__ int simple_hash(int k, int hash_size) { return k % hash_size; } __device__ int hash_table_find(int &key, int &hash_size, const int *xyz_to_vidx) { int hash_idx = simple_hash(key, hash_size); int v_idx = EMPTY_KEY; int prob_cnt = 0; while (true) { // found if (xyz_to_vidx[hash_idx * 2 + 0] == key) { v_idx = xyz_to_vidx[hash_idx * 2 + 1]; break; } // empty, not found if (xyz_to_vidx[hash_idx * 2 + 0] == EMPTY_KEY) { break; } // linear probing hash_idx = (hash_idx + 1) % hash_size; // security in case of dead loop prob_cnt += 1; if (prob_cnt >= hash_size) break; } return v_idx; } __global__ void sparse_local_attention_with_tensor_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int attend_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { /* in sparse attention, voxels are not necessary at the non-empty location attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; xyz_to_vidx += bs_idx * x_max * y_max * z_max; int num_samples = 0; for (int sz_idx = z_idx * z_stride - attend_range; sz_idx <= z_idx * z_stride + (z_stride - 1) + attend_range; ++sz_idx){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx * y_stride - attend_range; sy_idx <= y_idx * y_stride + (y_stride - 1) + attend_range; ++sy_idx){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx * x_stride - attend_range; sx_idx <= x_idx * x_stride + (x_stride - 1) + attend_range; ++sx_idx){ if (sx_idx >= x_max || sx_idx < 0) continue; int sv_idx = xyz_to_vidx[sx_idx * y_max * z_max + sy_idx * z_max + sz_idx]; if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } return; } void sparse_local_attention_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int attend_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { cudaError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); sparse_local_attention_with_tensor_kernel<<<blocks, threads>>>(x_max, y_max, z_max, x_stride, y_stride, z_stride, num_voxels, attend_size, attend_range, attend_indices, v_indices, xyz_to_vidx); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void sparse_local_attention_with_hash_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int attend_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { /* in sparse attention, voxels are not necessary at the non-empty location attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, hash_size, 2] voxel coordinates to voxel indices */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; xyz_to_vidx += bs_idx * hash_size * 2; int num_samples = 0; for (int sz_idx = z_idx * z_stride - attend_range; sz_idx <= z_idx * z_stride + (z_stride - 1) + attend_range; ++sz_idx){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx * y_stride - attend_range; sy_idx <= y_idx * y_stride + (y_stride - 1) + attend_range; ++sy_idx){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx * x_stride - attend_range; sx_idx <= x_idx * x_stride + (x_stride - 1) + attend_range; ++sx_idx){ if (sx_idx >= x_max || sx_idx < 0) continue; int skey = sx_idx * y_max * z_max + sy_idx * z_max + sz_idx; int sv_idx = hash_table_find(skey, hash_size, xyz_to_vidx); if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } return; } void sparse_local_attention_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int attend_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { cudaError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); sparse_local_attention_with_hash_kernel<<<blocks, threads>>>(x_max, y_max, z_max, x_stride, y_stride, z_stride, num_voxels, attend_size, attend_range, hash_size, attend_indices, v_indices, xyz_to_vidx); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void subm_local_attention_with_tensor_kernel(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int attend_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * x_max * y_max * z_max; int num_samples = 0; for (int sz_idx = z_idx - attend_range; sz_idx <= z_idx + attend_range; ++sz_idx){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx - attend_range; sy_idx <= y_idx + attend_range; ++sy_idx){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx - attend_range; sx_idx <= x_idx + attend_range; ++sx_idx){ if (sx_idx >= x_max || sx_idx < 0) continue; int sv_idx = xyz_to_vidx[sx_idx * y_max * z_max + sy_idx * z_max + sz_idx]; if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } return; } void subm_local_attention_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int attend_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx){ cudaError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); subm_local_attention_with_tensor_kernel<<<blocks, threads>>>(x_max, y_max, z_max, num_voxels, attend_size, attend_range, attend_indices, v_indices, xyz_to_vidx); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void subm_local_attention_with_hash_kernel(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int attend_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * hash_size * 2; int num_samples = 0; for (int sz_idx = z_idx - attend_range; sz_idx <= z_idx + attend_range; ++sz_idx){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx - attend_range; sy_idx <= y_idx + attend_range; ++sy_idx){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx - attend_range; sx_idx <= x_idx + attend_range; ++sx_idx){ if (sx_idx >= x_max || sx_idx < 0) continue; int skey = sx_idx * y_max * z_max + sy_idx * z_max + sz_idx; int sv_idx = hash_table_find(skey, hash_size, xyz_to_vidx); if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } return; } void subm_local_attention_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int attend_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx){ cudaError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); subm_local_attention_with_hash_kernel<<<blocks, threads>>>(x_max, y_max, z_max, num_voxels, attend_size, attend_range, hash_size, attend_indices, v_indices, xyz_to_vidx); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void sparse_strided_attention_with_tensor_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int num_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices range_spec: [num_range, 3] half start/end range & stride */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * x_max * y_max * z_max; int num_samples = 0; for (int range_idx = 0; range_idx < num_range; ++range_idx) { int search_x_start_range = range_spec[range_idx * 9 + 0]; int search_x_end_range = range_spec[range_idx * 9 + 1]; int search_x_stride = range_spec[range_idx * 9 + 2]; int search_y_start_range = range_spec[range_idx * 9 + 3]; int search_y_end_range = range_spec[range_idx * 9 + 4]; int search_y_stride = range_spec[range_idx * 9 + 5]; int search_z_start_range = range_spec[range_idx * 9 + 6]; int search_z_end_range = range_spec[range_idx * 9 + 7]; int search_z_stride = range_spec[range_idx * 9 + 8]; for (int z_offset = 0; z_offset < search_z_end_range; z_offset += search_z_stride) { for (int y_offset = 0; y_offset < search_y_end_range; y_offset += search_y_stride) { for (int x_offset = 0; x_offset < search_x_end_range; x_offset += search_x_stride) { if ((x_offset < search_x_start_range) && (y_offset < search_y_start_range) && (z_offset < search_z_start_range)) { continue; } // each loop process 8 points for (int sz_idx = z_idx * z_stride - z_offset; sz_idx <= z_idx * z_stride + (z_stride - 1) + z_offset; sz_idx += (2 * z_offset + z_stride - 1)){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx * y_stride - y_offset; sy_idx <= y_idx * y_stride + (y_stride - 1) + y_offset; sy_idx += (2 * y_offset + y_stride - 1)){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx * x_stride - x_offset; sx_idx <= x_idx * x_stride + (x_stride - 1) + x_offset; sx_idx += (2 * x_offset + x_stride - 1)){ if (sx_idx >= x_max || sx_idx < 0) continue; int sv_idx = xyz_to_vidx[sx_idx * y_max * z_max + sy_idx * z_max + sz_idx]; if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } } } } } return; } void sparse_strided_attention_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int num_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec){ cudaError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); sparse_strided_attention_with_tensor_kernel<<<blocks, threads>>>(x_max, y_max, z_max, x_stride, y_stride, z_stride, num_voxels, attend_size, num_range, attend_indices, v_indices, xyz_to_vidx, range_spec); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void sparse_strided_attention_with_hash_kernel(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int num_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, hash_size, 2] voxel coordinates to voxel indices range_spec: [num_range, 3] half start/end range & stride */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * hash_size * 2; int num_samples = 0; for (int range_idx = 0; range_idx < num_range; ++range_idx) { int search_x_start_range = range_spec[range_idx * 9 + 0]; int search_x_end_range = range_spec[range_idx * 9 + 1]; int search_x_stride = range_spec[range_idx * 9 + 2]; int search_y_start_range = range_spec[range_idx * 9 + 3]; int search_y_end_range = range_spec[range_idx * 9 + 4]; int search_y_stride = range_spec[range_idx * 9 + 5]; int search_z_start_range = range_spec[range_idx * 9 + 6]; int search_z_end_range = range_spec[range_idx * 9 + 7]; int search_z_stride = range_spec[range_idx * 9 + 8]; for (int z_offset = 0; z_offset < search_z_end_range; z_offset += search_z_stride) { for (int y_offset = 0; y_offset < search_y_end_range; y_offset += search_y_stride) { for (int x_offset = 0; x_offset < search_x_end_range; x_offset += search_x_stride) { if ((x_offset < search_x_start_range) && (y_offset < search_y_start_range) && (z_offset < search_z_start_range)) { continue; } // each loop process 8 points for (int sz_idx = z_idx * z_stride - z_offset; sz_idx <= z_idx * z_stride + (z_stride - 1) + z_offset; sz_idx += (2 * z_offset + z_stride - 1)){ if (sz_idx >= z_max || sz_idx < 0) continue; for (int sy_idx = y_idx * y_stride - y_offset; sy_idx <= y_idx * y_stride + (y_stride - 1) + y_offset; sy_idx += (2 * y_offset + y_stride - 1)){ if (sy_idx >= y_max || sy_idx < 0) continue; for (int sx_idx = x_idx * x_stride - x_offset; sx_idx <= x_idx * x_stride + (x_stride - 1) + x_offset; sx_idx += (2 * x_offset + x_stride - 1)){ if (sx_idx >= x_max || sx_idx < 0) continue; int skey = sx_idx * y_max * z_max + sy_idx * z_max + sz_idx; int sv_idx = hash_table_find(skey, hash_size, xyz_to_vidx); if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } } } } } return; } void sparse_strided_attention_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int x_stride, int y_stride, int z_stride, int num_voxels, int attend_size, int num_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec){ cudaError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); sparse_strided_attention_with_hash_kernel<<<blocks, threads>>>(x_max, y_max, z_max, x_stride, y_stride, z_stride, num_voxels, attend_size, num_range, hash_size, attend_indices, v_indices, xyz_to_vidx, range_spec); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void subm_strided_attention_with_tensor_kernel(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int num_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, x_max, y_max, z_max] voxel coordinates to voxel indices range_spec: [num_range, 3] half start/end range & stride */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * x_max * y_max * z_max; int num_samples = 0; for (int range_idx = 0; range_idx < num_range; ++range_idx) { int search_x_start_range = range_spec[range_idx * 9 + 0]; int search_x_end_range = range_spec[range_idx * 9 + 1]; int search_x_stride = range_spec[range_idx * 9 + 2]; int search_y_start_range = range_spec[range_idx * 9 + 3]; int search_y_end_range = range_spec[range_idx * 9 + 4]; int search_y_stride = range_spec[range_idx * 9 + 5]; int search_z_start_range = range_spec[range_idx * 9 + 6]; int search_z_end_range = range_spec[range_idx * 9 + 7]; int search_z_stride = range_spec[range_idx * 9 + 8]; int x_step = 0; int y_step = 0; int z_step = 0; for (int z_offset = 0; z_offset < search_z_end_range; z_offset += search_z_stride) { for (int y_offset = 0; y_offset < search_y_end_range; y_offset += search_y_stride) { for (int x_offset = 0; x_offset < search_x_end_range; x_offset += search_x_stride) { if ((x_offset < search_x_start_range) && (y_offset < search_y_start_range) && (z_offset < search_z_start_range)) { continue; } // each loop process 8 points if (z_offset == 0) { z_step = 1; } else { z_step = 2 * z_offset; } for (int sz_idx = z_idx - z_offset; sz_idx <= z_idx + z_offset; sz_idx += z_step){ if (sz_idx >= z_max || sz_idx < 0) continue; if (sz_idx >= z_max || sz_idx < 0) continue; if (y_offset == 0) { y_step = 1; } else { y_step = 2 * y_offset; } for (int sy_idx = y_idx - y_offset; sy_idx <= y_idx + y_offset; sy_idx += y_step){ if (sy_idx >= y_max || sy_idx < 0) continue; if (x_offset == 0) { x_step = 1; } else { x_step = 2 * x_offset; } for (int sx_idx = x_idx - x_offset; sx_idx <= x_idx + x_offset; sx_idx += x_step){ if (sx_idx >= x_max || sx_idx < 0) continue; int sv_idx = xyz_to_vidx[sx_idx * y_max * z_max + sy_idx * z_max + sz_idx]; if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } } } } } return; } void subm_strided_attention_with_tensor_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int num_range, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec){ cudaError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); subm_strided_attention_with_tensor_kernel<<<blocks, threads>>>(x_max, y_max, z_max, num_voxels, attend_size, num_range, attend_indices, v_indices, xyz_to_vidx, range_spec); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void subm_strided_attention_with_hash_kernel(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int num_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec) { /* attend_indices: [num_voxels, attend_size] for gather attend indices v_indices: [num_voxels, 4] bs + zyx indices of voxels xyz_to_vidx: [bs, hash_size, 2] voxel coordinates to voxel indices range_spec: [num_range, 3] half start/end range & stride */ int th_idx = blockIdx.x * blockDim.x + threadIdx.x; if (th_idx >= num_voxels) return; int bs_idx = v_indices[th_idx * 4 + 0]; int z_idx = v_indices[th_idx * 4 + 1]; int y_idx = v_indices[th_idx * 4 + 2]; int x_idx = v_indices[th_idx * 4 + 3]; if (x_idx >= x_max || x_idx < 0 || y_idx < 0 || y_idx >= y_max || z_idx < 0 || z_idx >= z_max) return; xyz_to_vidx += bs_idx * hash_size * 2; int num_samples = 0; for (int range_idx = 0; range_idx < num_range; ++range_idx) { int search_x_start_range = range_spec[range_idx * 9 + 0]; int search_x_end_range = range_spec[range_idx * 9 + 1]; int search_x_stride = range_spec[range_idx * 9 + 2]; int search_y_start_range = range_spec[range_idx * 9 + 3]; int search_y_end_range = range_spec[range_idx * 9 + 4]; int search_y_stride = range_spec[range_idx * 9 + 5]; int search_z_start_range = range_spec[range_idx * 9 + 6]; int search_z_end_range = range_spec[range_idx * 9 + 7]; int search_z_stride = range_spec[range_idx * 9 + 8]; int x_step = 0; int y_step = 0; int z_step = 0; for (int z_offset = 0; z_offset < search_z_end_range; z_offset += search_z_stride) { for (int y_offset = 0; y_offset < search_y_end_range; y_offset += search_y_stride) { for (int x_offset = 0; x_offset < search_x_end_range; x_offset += search_x_stride) { if ((x_offset < search_x_start_range) && (y_offset < search_y_start_range) && (z_offset < search_z_start_range)) { continue; } // each loop process 8 points if (z_offset == 0) { z_step = 1; } else { z_step = 2 * z_offset; } for (int sz_idx = z_idx - z_offset; sz_idx <= z_idx + z_offset; sz_idx += z_step){ if (sz_idx >= z_max || sz_idx < 0) continue; if (y_offset == 0) { y_step = 1; } else { y_step = 2 * y_offset; } for (int sy_idx = y_idx - y_offset; sy_idx <= y_idx + y_offset; sy_idx += y_step){ if (sy_idx >= y_max || sy_idx < 0) continue; if (x_offset == 0) { x_step = 1; } else { x_step = 2 * x_offset; } for (int sx_idx = x_idx - x_offset; sx_idx <= x_idx + x_offset; sx_idx += x_step){ if (sx_idx >= x_max || sx_idx < 0) continue; int skey = sx_idx * y_max * z_max + sy_idx * z_max + sz_idx; int sv_idx = hash_table_find(skey, hash_size, xyz_to_vidx); if (sv_idx != EMPTY_KEY) { // found non-empty index if (num_samples >= attend_size) return; // full and return attend_indices[th_idx * attend_size + num_samples] = sv_idx; num_samples++; }else { // not found ; } } } } } } } } return; } void subm_strided_attention_with_hash_kernel_launcher(int x_max, int y_max, int z_max, int num_voxels, int attend_size, int num_range, int hash_size, int *attend_indices, const int *v_indices, const int *xyz_to_vidx, const int *range_spec){ cudaError_t err; dim3 blocks(DIVUP(num_voxels, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); subm_strided_attention_with_hash_kernel<<<blocks, threads>>>(x_max, y_max, z_max, num_voxels, attend_size, num_range, hash_size, attend_indices, v_indices, xyz_to_vidx, range_spec); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
a4d4883b024d62700616ddcf1ba40d65a10e40a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //# tKernel.in_.cu: simple function to test Kernel class //# Copyright (C) 2013 ASTRON (Netherlands Institute for Radio Astronomy) //# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands //# //# This file is part of the LOFAR software suite. //# The LOFAR software suite is free software: you can redistribute it and/or //# modify it under the terms of the GNU General Public License as published //# by the Free Software Foundation, either version 3 of the License, or //# (at your option) any later version. //# //# The LOFAR software suite is distributed in the hope that it will be useful, //# but WITHOUT ANY WARRANTY; without even the implied warranty of //# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //# GNU General Public License for more details. //# //# You should have received a copy of the GNU General Public License along //# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. //# //# $Id: tKernel.in_.cu 24903 2013-05-14 23:50:58Z amesfoort $ extern "C" { // test various "types" of args (for arg setting), esp. an immediate and a buffer __global__ void testKernel(float *out, const float *in, size_t size, float inc) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { out[i] = in[i] + inc; } } }
a4d4883b024d62700616ddcf1ba40d65a10e40a6.cu
//# tKernel.in_.cu: simple function to test Kernel class //# Copyright (C) 2013 ASTRON (Netherlands Institute for Radio Astronomy) //# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands //# //# This file is part of the LOFAR software suite. //# The LOFAR software suite is free software: you can redistribute it and/or //# modify it under the terms of the GNU General Public License as published //# by the Free Software Foundation, either version 3 of the License, or //# (at your option) any later version. //# //# The LOFAR software suite is distributed in the hope that it will be useful, //# but WITHOUT ANY WARRANTY; without even the implied warranty of //# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //# GNU General Public License for more details. //# //# You should have received a copy of the GNU General Public License along //# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. //# //# $Id: tKernel.in_.cu 24903 2013-05-14 23:50:58Z amesfoort $ extern "C" { // test various "types" of args (for arg setting), esp. an immediate and a buffer __global__ void testKernel(float *out, const float *in, size_t size, float inc) { unsigned i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { out[i] = in[i] + inc; } } }
9f68bed9aebb51c3204a59b11b4da29b9f1b7cae.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); hipDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
9f68bed9aebb51c3204a59b11b4da29b9f1b7cae.cu
/*************************************************************************************************** * Copyright (c) 2020, Vijay Thakkar ([email protected]). **************************************************************************************************/ ////////////////////////////////////////////////////////////////////// // THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY // ////////////////////////////////////////////////////////////////////// #include "benchmark/benchmark.h" #include "cuasr/gemm/device/default_srgemm_configuration.h" #include "cuasr/gemm/device/srgemm.h" #include "cuasr/functional.h" #include "harness.h" //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x32x8_8x32x1_2x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_16x32x1_4x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_16x64x1_4x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_32x32x1_8x4_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 1 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_32x64x1_8x8_4x8_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_64x32x1_8x8_8x4_1x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x32x8_8x16x1_2x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 8 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_8x64x8_8x32x1_2x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_16x16x1_4x2_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_16x32x1_4x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 16 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x128x8_16x64x1_4x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_32x16x1_4x4_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_32x32x1_8x4_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 1 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_32x64x1_8x8_4x8_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 1 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_64x32x1_8x8_8x4_1x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x32x1_4x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_32x32x1_8x4_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 1 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x64x1_8x8_4x8_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 1 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_64x32x1_8x8_8x4_2x1) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x32x8_8x16x1_2x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 16 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x8_8x32x1_2x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x16x1_4x2_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_16x32x1_4x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_16x64x1_4x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_32x16x1_4x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x32x1_8x4_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 2 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_32x64x1_8x8_4x8_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_64x16x1_8x4_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_64x32x1_8x8_8x4_2x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x64x16_8x16x1_2x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 16 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_16x128x16_8x32x1_2x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_16x8x1_2x2_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x8_16x16x1_4x2_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x8_16x32x1_4x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 32 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x256x8_16x64x1_4x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_32x16x1_4x4_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_32x32x1_8x4_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 2 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x256x8_32x64x1_8x8_4x8_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 2 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_64x32x1_8x8_8x4_2x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 32 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x32x8_8x16x1_2x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x8_16x16x1_4x2_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_16x32x1_4x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 128 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x8_32x16x1_4x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_32x32x1_8x4_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 2 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_32x64x1_8x8_4x8_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 32 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x32x8_64x16x1_8x4_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 8 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 2 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x64x8_64x32x1_8x8_8x4_4x2) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 64 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x64x16_8x16x1_2x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 32 x 128 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_32x128x16_8x32x1_2x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 2 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 64 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x32x16_16x8x1_2x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x64x8_16x16x1_4x2_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x128x8_16x32x1_4x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 8 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 64 x 256 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_64x256x8_16x64x1_4x8_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 2 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 32 x 16 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>; using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x32x16_32x8x1_4x2_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 4 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 128 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x64x8_32x16x1_4x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 4 x 8 // Warps / Block: 4 x 4 // Threadblock: 128 x 128 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_128x128x8_32x32x1_8x4_4x8_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif //////////////////////////////////////////////////////////////////////////////// // Elements / Thread: 8 x 4 // Threads / Warp: 8 x 4 // Warps / Block: 4 x 4 // Threadblock: 256 x 64 x 8 #if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2) static void BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) { const auto N = static_cast<int>(state.range(0)); using precision = float; using OpClass = cutlass::arch::OpClassSimt; using SmArch = cutlass::arch::Sm50; using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< // precision, precision, precision, precision, OpClass, // cuasr::minimum<precision>, cuasr::maximum<precision>, SmArch>; using AddOp = Config::AdditionOp; using MultOp = Config::MultiplicationOp; using EpilogueOutputOp = Config::EpilogueOutputOp; using Srgemm = cuasr::gemm::device::Srgemm< // AddOp, MultOp, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, cutlass::layout::RowMajor, // precision, OpClass, SmArch, // ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, // cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; // setup bench harness cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N }); // benchmark loop for (auto _ : state) { benchmark::DoNotOptimize(bench.run()); cudaDeviceSynchronize(); } double flops_per_itr = 2.0 * N * N * N; state.counters["Flop/s"] = benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate); } BENCHMARK(BM_SM50_device_minimum_maximum_ssrgemm_tt_t_256x64x8_64x16x1_8x4_8x4_4x4) ->RangeMultiplier(2)->Range(256, 4096); #endif
dacc96b4f3327a504c0a2072c1a8428dd21f356a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "relu_activation.hh" #include "nn_exception.hh" __global__ void reluActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { A[index] = fmaxf(Z[index], 0); } } __global__ void reluActivationBackprop(float* Z, float* dA, float* dZ, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { if (Z[index] > 0) { dZ[index] = dA[index]; } else { dZ[index] = 0; } } } ReLUActivation::ReLUActivation(std::string name) { this->name = name; } ReLUActivation::~ReLUActivation() { } Matrix& ReLUActivation::forward(Matrix& Z) { this->Z = Z; A.allocateMemoryIfNotAllocated(Z.shape); dim3 block_size(256); dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x); hipLaunchKernelGGL(( reluActivationForward), dim3(num_of_blocks), dim3(block_size), 0, 0, Z.data.get(), A.data.get(), Z.shape.x, Z.shape.y); NNException::throwIfDeviceErrorsOccurred("Cannot perform ReLU forward propagation."); return A; } Matrix& ReLUActivation::backprop(Matrix& dA, float learning_rate) { dZ.allocateMemoryIfNotAllocated(Z.shape); dim3 block_size(256); dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x); hipLaunchKernelGGL(( reluActivationBackprop), dim3(num_of_blocks), dim3(block_size), 0, 0, Z.data.get(), dA.data.get(), dZ.data.get(), Z.shape.x, Z.shape.y); NNException::throwIfDeviceErrorsOccurred("Cannot perform ReLU back propagation"); return dZ; }
dacc96b4f3327a504c0a2072c1a8428dd21f356a.cu
#include "relu_activation.hh" #include "nn_exception.hh" __global__ void reluActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { A[index] = fmaxf(Z[index], 0); } } __global__ void reluActivationBackprop(float* Z, float* dA, float* dZ, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { if (Z[index] > 0) { dZ[index] = dA[index]; } else { dZ[index] = 0; } } } ReLUActivation::ReLUActivation(std::string name) { this->name = name; } ReLUActivation::~ReLUActivation() { } Matrix& ReLUActivation::forward(Matrix& Z) { this->Z = Z; A.allocateMemoryIfNotAllocated(Z.shape); dim3 block_size(256); dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x); reluActivationForward<<<num_of_blocks, block_size>>>(Z.data.get(), A.data.get(), Z.shape.x, Z.shape.y); NNException::throwIfDeviceErrorsOccurred("Cannot perform ReLU forward propagation."); return A; } Matrix& ReLUActivation::backprop(Matrix& dA, float learning_rate) { dZ.allocateMemoryIfNotAllocated(Z.shape); dim3 block_size(256); dim3 num_of_blocks((Z.shape.y * Z.shape.x + block_size.x - 1) / block_size.x); reluActivationBackprop<<<num_of_blocks, block_size>>>(Z.data.get(), dA.data.get(), dZ.data.get(), Z.shape.x, Z.shape.y); NNException::throwIfDeviceErrorsOccurred("Cannot perform ReLU back propagation"); return dZ; }
6d30569f70caad9267c868e265879b8008aca3ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "reduce.h" __device__ float merge(float old,float opOutput,float *extraParams) { return opOutput + old; } __device__ float update(float old,float opOutput,float *extraParams) { return opOutput + old; } __device__ float op(float d1,float *extraParams) { return fabsf(d1); } __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *params,float *result) { return reduction; } extern "C" __global__ void norm1_strided_float(int n, int xOffset,float *dx,int incx,float *params,float *result) { transform(n,xOffset,dx,incx,params,result); }
6d30569f70caad9267c868e265879b8008aca3ac.cu
#include "reduce.h" __device__ float merge(float old,float opOutput,float *extraParams) { return opOutput + old; } __device__ float update(float old,float opOutput,float *extraParams) { return opOutput + old; } __device__ float op(float d1,float *extraParams) { return fabsf(d1); } __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *params,float *result) { return reduction; } extern "C" __global__ void norm1_strided_float(int n, int xOffset,float *dx,int incx,float *params,float *result) { transform(n,xOffset,dx,incx,params,result); }
ee7258a7fae7cf8393abf767e018138118c837c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlarf.cu, normal z -> c, Thu Oct 8 23:05:33 2020 @author Azzam Haidar */ #include "magma_internal.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 /******************************************************************************/ __global__ void magma_clarf_kernel( int m, const magmaFloatComplex *dv, const magmaFloatComplex *dtau, magmaFloatComplex *dc, int lddc ) { if ( !MAGMA_C_EQUAL(*dtau, MAGMA_C_ZERO) ) { const int tx = threadIdx.x; dc = dc + blockIdx.x * lddc; __shared__ magmaFloatComplex sum[ BLOCK_SIZE ]; magmaFloatComplex tmp; /* perform w := v**H * C */ if (tx == 0) tmp = dc[0]; //since V[0] should be one else tmp = MAGMA_C_ZERO; for( int j = tx+1; j < m; j += BLOCK_SIZE ) { tmp += MAGMA_C_MUL( MAGMA_C_CONJ( dv[j] ), dc[j] ); } sum[tx] = tmp; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); /* C := C - v * w */ __syncthreads(); tmp = - MAGMA_C_CONJ(*dtau) * sum[0]; for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE ) dc[j] += tmp * dv[j]; if (tx == 0) dc[0] += tmp; } } /******************************************************************************/ __global__ void magma_clarf_smkernel( int m, int n, magmaFloatComplex *dv, magmaFloatComplex *dtau, magmaFloatComplex *dc, int lddc ) { if ( ! MAGMA_C_EQUAL(*dtau, MAGMA_C_ZERO) ) { const int i = threadIdx.x, col= threadIdx.y; for( int k = col; k < n; k += BLOCK_SIZEy ) { dc = dc + k * lddc; __shared__ magmaFloatComplex sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; magmaFloatComplex lsum; /* w := v**H * C */ lsum = MAGMA_C_ZERO; for( int j = i; j < m; j += BLOCK_SIZEx ) { if (j == 0) lsum += MAGMA_C_MUL( MAGMA_C_ONE, dc[j] ); else lsum += MAGMA_C_MUL( MAGMA_C_CONJ( dv[j] ), dc[j] ); } sum[i][col] = lsum; magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum ); /* C := C - v * w */ __syncthreads(); magmaFloatComplex z__1 = - MAGMA_C_CONJ(*dtau) * sum[0][col]; for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) { if (j == 0) dc[j] += z__1; else dc[j] += z__1 * dv[j]; } } } } /******************************************************************************/ /* Apply a complex elementary reflector H to a complex M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a complex scalar and v is a complex vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. This routine uses only one SM (block). */ extern "C" void magma_clarf_sm( magma_int_t m, magma_int_t n, magmaFloatComplex *dv, magmaFloatComplex *dtau, magmaFloatComplex *dc, magma_int_t lddc, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); hipLaunchKernelGGL(( magma_clarf_smkernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, n, dv, dtau, dc, lddc ); } /***************************************************************************//** Apply a complex elementary reflector H to a complex M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a complex scalar and v is a complex vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. *******************************************************************************/ extern "C" magma_int_t magma_clarf_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dv, magmaFloatComplex_const_ptr dtau, magmaFloatComplex_ptr dC, magma_int_t lddc, magma_queue_t queue ) { dim3 grid( n, 1, 1 ); dim3 threads( BLOCK_SIZE ); if ( n > 0 ) { hipLaunchKernelGGL(( magma_clarf_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dv, dtau, dC, lddc); } // The computation can be done on 1 SM with the following routine. // magma_clarf_sm(m, n, dv, dtau, dc, lddc); return MAGMA_SUCCESS; }
ee7258a7fae7cf8393abf767e018138118c837c7.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zlarf.cu, normal z -> c, Thu Oct 8 23:05:33 2020 @author Azzam Haidar */ #include "magma_internal.h" #include "magma_templates.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define BLOCK_SIZEx 32 #define BLOCK_SIZEy 16 /******************************************************************************/ __global__ void magma_clarf_kernel( int m, const magmaFloatComplex *dv, const magmaFloatComplex *dtau, magmaFloatComplex *dc, int lddc ) { if ( !MAGMA_C_EQUAL(*dtau, MAGMA_C_ZERO) ) { const int tx = threadIdx.x; dc = dc + blockIdx.x * lddc; __shared__ magmaFloatComplex sum[ BLOCK_SIZE ]; magmaFloatComplex tmp; /* perform w := v**H * C */ if (tx == 0) tmp = dc[0]; //since V[0] should be one else tmp = MAGMA_C_ZERO; for( int j = tx+1; j < m; j += BLOCK_SIZE ) { tmp += MAGMA_C_MUL( MAGMA_C_CONJ( dv[j] ), dc[j] ); } sum[tx] = tmp; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); /* C := C - v * w */ __syncthreads(); tmp = - MAGMA_C_CONJ(*dtau) * sum[0]; for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE ) dc[j] += tmp * dv[j]; if (tx == 0) dc[0] += tmp; } } /******************************************************************************/ __global__ void magma_clarf_smkernel( int m, int n, magmaFloatComplex *dv, magmaFloatComplex *dtau, magmaFloatComplex *dc, int lddc ) { if ( ! MAGMA_C_EQUAL(*dtau, MAGMA_C_ZERO) ) { const int i = threadIdx.x, col= threadIdx.y; for( int k = col; k < n; k += BLOCK_SIZEy ) { dc = dc + k * lddc; __shared__ magmaFloatComplex sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; magmaFloatComplex lsum; /* w := v**H * C */ lsum = MAGMA_C_ZERO; for( int j = i; j < m; j += BLOCK_SIZEx ) { if (j == 0) lsum += MAGMA_C_MUL( MAGMA_C_ONE, dc[j] ); else lsum += MAGMA_C_MUL( MAGMA_C_CONJ( dv[j] ), dc[j] ); } sum[i][col] = lsum; magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum ); /* C := C - v * w */ __syncthreads(); magmaFloatComplex z__1 = - MAGMA_C_CONJ(*dtau) * sum[0][col]; for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) { if (j == 0) dc[j] += z__1; else dc[j] += z__1 * dv[j]; } } } } /******************************************************************************/ /* Apply a complex elementary reflector H to a complex M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a complex scalar and v is a complex vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. This routine uses only one SM (block). */ extern "C" void magma_clarf_sm( magma_int_t m, magma_int_t n, magmaFloatComplex *dv, magmaFloatComplex *dtau, magmaFloatComplex *dc, magma_int_t lddc, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); magma_clarf_smkernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( m, n, dv, dtau, dc, lddc ); } /***************************************************************************//** Apply a complex elementary reflector H to a complex M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v**H where tau is a complex scalar and v is a complex vector. If tau = 0, then H is taken to be the unit matrix. To apply H**H (the conjugate transpose of H), supply conjg(tau) instead tau. *******************************************************************************/ extern "C" magma_int_t magma_clarf_gpu( magma_int_t m, magma_int_t n, magmaFloatComplex_const_ptr dv, magmaFloatComplex_const_ptr dtau, magmaFloatComplex_ptr dC, magma_int_t lddc, magma_queue_t queue ) { dim3 grid( n, 1, 1 ); dim3 threads( BLOCK_SIZE ); if ( n > 0 ) { magma_clarf_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dv, dtau, dC, lddc); } // The computation can be done on 1 SM with the following routine. // magma_clarf_sm(m, n, dv, dtau, dc, lddc); return MAGMA_SUCCESS; }
2453a5232f0157d78a90a142433e390a4e87cbbc.hip
// !!! This is a file automatically generated by hipify!!! #include "cupy_cufftXt.h" // this must define d_loadCallbackPtr ${dev_load_callback_ker} // this must define d_storeCallbackPtr ${dev_store_callback_ker} hipfftResult set_callback(hipfftHandle plan, cufftXtCallbackType type, bool cb_load, void** callerInfo) { if (cb_load) { switch (type) { #ifdef HAS_LOAD_CALLBACK case CUFFT_CB_LD_COMPLEX: { cufftCallbackLoadC h_ptr; hipMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_LD_COMPLEX_DOUBLE: { cufftCallbackLoadZ h_ptr; hipMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_LD_REAL: { cufftCallbackLoadR h_ptr; hipMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_LD_REAL_DOUBLE: { cufftCallbackLoadD h_ptr; hipMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } #endif // HAS_LOAD_CALLBACK default: { throw std::runtime_error("unrecognized callback"); } } } else { switch (type) { #ifdef HAS_STORE_CALLBACK case CUFFT_CB_ST_COMPLEX: { cufftCallbackStoreC h_ptr; hipMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_ST_COMPLEX_DOUBLE: { cufftCallbackStoreZ h_ptr; hipMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_ST_REAL: { cufftCallbackStoreR h_ptr; hipMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_ST_REAL_DOUBLE: { cufftCallbackStoreD h_ptr; hipMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } #endif // HAS_STORE_CALLBACK default: { throw std::runtime_error("unrecognized callback"); } } } }
2453a5232f0157d78a90a142433e390a4e87cbbc.cu
#include "cupy_cufftXt.h" // this must define d_loadCallbackPtr ${dev_load_callback_ker} // this must define d_storeCallbackPtr ${dev_store_callback_ker} cufftResult set_callback(cufftHandle plan, cufftXtCallbackType type, bool cb_load, void** callerInfo) { if (cb_load) { switch (type) { #ifdef HAS_LOAD_CALLBACK case CUFFT_CB_LD_COMPLEX: { cufftCallbackLoadC h_ptr; cudaMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_LD_COMPLEX_DOUBLE: { cufftCallbackLoadZ h_ptr; cudaMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_LD_REAL: { cufftCallbackLoadR h_ptr; cudaMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_LD_REAL_DOUBLE: { cufftCallbackLoadD h_ptr; cudaMemcpyFromSymbol(&h_ptr, d_loadCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } #endif // HAS_LOAD_CALLBACK default: { throw std::runtime_error("unrecognized callback"); } } } else { switch (type) { #ifdef HAS_STORE_CALLBACK case CUFFT_CB_ST_COMPLEX: { cufftCallbackStoreC h_ptr; cudaMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_ST_COMPLEX_DOUBLE: { cufftCallbackStoreZ h_ptr; cudaMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_ST_REAL: { cufftCallbackStoreR h_ptr; cudaMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } case CUFFT_CB_ST_REAL_DOUBLE: { cufftCallbackStoreD h_ptr; cudaMemcpyFromSymbol(&h_ptr, d_storeCallbackPtr, sizeof(h_ptr)); return cufftXtSetCallback(plan, (void**)&h_ptr, type, callerInfo); } #endif // HAS_STORE_CALLBACK default: { throw std::runtime_error("unrecognized callback"); } } } }
e9a2f2effab850df73cf24eb652ac22cb0277465.hip
// !!! This is a file automatically generated by hipify!!! #include <Aquila/core/detail/Export.hpp> #include <Aquila/utilities/thrust/thrust_interop.hpp> #include "Aquila/utilities/cuda/GPUSortingPriv.hpp" namespace cv { namespace cuda { namespace detail { template AQUILA_EXPORTS void sortAscending<ushort>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescending<ushort>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<ushort>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<ushort>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortAscending<short>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescending<short>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<short>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<short>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortAscending<int>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescending<int>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<int>(cv::cuda::GpuMat&, hipStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<int>(cv::cuda::GpuMat&, hipStream_t); } } }
e9a2f2effab850df73cf24eb652ac22cb0277465.cu
#include <Aquila/core/detail/Export.hpp> #include <Aquila/utilities/thrust/thrust_interop.hpp> #include "Aquila/utilities/cuda/GPUSortingPriv.hpp" namespace cv { namespace cuda { namespace detail { template AQUILA_EXPORTS void sortAscending<ushort>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescending<ushort>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<ushort>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<ushort>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortAscending<short>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescending<short>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<short>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<short>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortAscending<int>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescending<int>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortAscendingEachRow<int>(cv::cuda::GpuMat&, cudaStream_t); template AQUILA_EXPORTS void sortDescendingEachRow<int>(cv::cuda::GpuMat&, cudaStream_t); } } }
c8cbfef4279ccdad71677d6f33d2d228a8c7d58d.hip
// !!! This is a file automatically generated by hipify!!! #define __thrust_compiler_fence() __sync_synchronize() #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/array2d.h> #include <cusp/multiply.h> #include <cusp/array2d.h> #include <cusp/print.h> #include <bcl/bcl.hpp> #include <bcl/backends/experimental/nvshmem/backend.hpp> #include <bcl/containers/experimental/cuda/CudaMatrix.hpp> #include <bcl/containers/experimental/cuda/launch_kernel.cuh> #include <thrust/sort.h> #include <bcl/containers/experimental/cuda/CudaSPMatrix.hpp> #include <bcl/containers/experimental/cuda/algorithms/algorithm.hpp> #include <unordered_map> #include <chrono> #include <essl.h> int main(int argc, char** argv) { BCL::init(16); BCL::cuda::init(); using T = float; using index_type = int64_t; bool verify_result = false; std::string fname = std::string(argv[1]); // Number of vecs in SpMM (width of multi-vec, matrix) size_t num_vecs = std::atoi(argv[2]); auto matrix_shape = BCL::matrix_io::matrix_info(fname); size_t m = matrix_shape.shape[0]; size_t k = matrix_shape.shape[1]; size_t n = num_vecs; BCL::print("Choosing blocks...\n"); auto blocks = BCL::block_matmul(m, n, k); using allocator_type = BCL::cuda::bcl_allocator<T>; using indexing_type = BCL::cuda::RowMajorIndexing; BCL::print("Reading matrices...\n"); BCL::cuda::SPMatrix<T, index_type> a(fname, std::move(blocks[0])); BCL::cuda::Matrix<T, indexing_type> b(k, n, std::move(blocks[1])); BCL::cuda::Matrix<T, indexing_type> c(m, n, std::move(blocks[2])); b = 1; c = 0; BCL::cuda::barrier(); BCL::print("Info:\n"); if (BCL::rank() == 0) { printf("A:\n"); a.print_info(); printf("B:\n"); b.print_info(); printf("C:\n"); c.print_info(); } using queue_type = BCL::ChecksumQueue<BCL::cuda::CudaMatrix_ptr<T>, BCL::djb2_hash<BCL::cuda::CudaMatrix_ptr<T>>>; std::vector<queue_type> queues; for (size_t i = 0; i < BCL::nprocs(); i++) { queues.emplace_back(queue_type(i, a.grid_shape()[1]+8)); } hipsparseStatus_t status = hipsparseCreate(&BCL::cuda::bcl_cusparse_handle_); // printf("A taking %lf GB, B %lf GB\n", 1.0e-9*a.my_mem(), 1.0e-9*b.my_mem()); assert(a.grid_shape()[1] == b.grid_shape()[0]); auto ws_grid = generate_grid(a); BCL::cuda::barrier(); auto begin = std::chrono::high_resolution_clock::now(); BCL::cuda::gemm_workstealing(a, b, c, queues, ws_grid); BCL::cuda::barrier(); auto end = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration<double>(end - begin).count(); double max_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::max<double>{}); double max_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::max<double>{}); double max_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::max<double>{}); double max_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::max<double>{}); double max_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::max<double>{}); double min_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::min<double>{}); double min_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::min<double>{}); double min_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::min<double>{}); double min_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::min<double>{}); double min_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::min<double>{}); BCL::cuda::duration_issue = BCL::allreduce(BCL::cuda::duration_issue, std::plus<double>{}); BCL::cuda::duration_sync = BCL::allreduce(BCL::cuda::duration_sync, std::plus<double>{}); BCL::cuda::duration_compute = BCL::allreduce(BCL::cuda::duration_compute, std::plus<double>{}); BCL::cuda::duration_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, std::plus<double>{}); BCL::cuda::duration_barrier = BCL::allreduce(BCL::cuda::duration_barrier, std::plus<double>{}); BCL::print("SpMM took %lf s\n", duration); if (BCL::rank() == 0) { printf("duration_issue %lf (%lf -> %lf)\n", BCL::cuda::duration_issue / BCL::nprocs(), min_issue, max_issue); printf("duration_sync %lf (%lf -> %lf)\n", BCL::cuda::duration_sync / BCL::nprocs(), min_sync, max_sync); printf("duration_compute %lf (%lf -> %lf)\n", BCL::cuda::duration_compute / BCL::nprocs(), min_compute, max_compute); printf("duration_accumulate %lf (%lf -> %lf)\n", BCL::cuda::duration_accumulate / BCL::nprocs(), min_accumulate, max_accumulate); printf("duration_barrier %lf (%lf -> %lf)\n", BCL::cuda::duration_barrier / BCL::nprocs(), min_barrier, max_barrier); } BCL::barrier(); fflush(stdout); BCL::barrier(); if (BCL::rank() == 0 && verify_result) { fprintf(stderr, "Reading in matrix...\n"); BCL::CSRMatrix<T, index_type> mat(fname); fprintf(stderr, "Copying to GPU...\n"); auto local_a = BCL::cuda::to_gpu<T, index_type, allocator_type>(mat); fprintf(stderr, "Creating local b...\n"); BCL::cuda::CudaMatrix<T, allocator_type, indexing_type> local_b({k, n}); fprintf(stderr, "Creating local c...\n"); BCL::cuda::CudaMatrix<T, allocator_type, indexing_type> local_c({m, n}); fprintf(stderr, "Writing to matrices...\n"); local_b = 1; local_c = 0; fprintf(stderr, "Doing local spmm...\n"); BCL::cuda::spmm_cusparse(local_a, local_b, local_c); hipDeviceSynchronize(); fprintf(stderr, "Getting C matrix...\n"); auto distributed_c = c.get_matrix(); std::vector<T> local_data(local_c.size()); hipMemcpy(local_data.data(), local_c.data(), sizeof(T)*local_c.size(), hipMemcpyDeviceToHost); assert(distributed_c.size() == local_c.size()); fprintf(stderr, "Checking accuracy...\n"); T eps = 1.0e-4; size_t matching = 0; bool print = true; for (size_t i = 0; i < c.shape()[0]; i++) { for (size_t j = 0; j < c.shape()[1]; j++) { size_t d_idx = i*c.shape()[1] + j; size_t l_idx = indexing_type().index(i, j, local_c.ld()); if (std::abs(distributed_c[d_idx] - local_data[l_idx]) > eps) { // assert(false); if (print) { printf("O(%lu, %lu) %2.2lf != %2.2lf\n", i, j, distributed_c[d_idx], local_data[l_idx]); } } else { if (print) { // printf("X %2.2lf == %2.2lf\n", distributed_c[d_idx], local_data[l_idx]); } matching++; } } if (print) { // printf("\n"); } } /* for (size_t i = 0; i < distributed_c.size(); i++) { if (std::abs(distributed_c[i] - local_data[i]) > eps) { // fprintf(stderr, "[%lu] %f != %f\n", i, distributed_c[i], local_data[i]); } else { matching++; } } */ printf("%lu / %lu (%lf%%) indices match.\n", matching, distributed_c.size(), 100 * ((double) matching) / distributed_c.size()); if (matching == distributed_c.size()) { printf("OK.\n"); } else { printf("***FAILED!***\n"); } } BCL::finalize(); return 0; }
c8cbfef4279ccdad71677d6f33d2d228a8c7d58d.cu
#define __thrust_compiler_fence() __sync_synchronize() #include <cusp/io/matrix_market.h> #include <cusp/csr_matrix.h> #include <cusp/array2d.h> #include <cusp/multiply.h> #include <cusp/array2d.h> #include <cusp/print.h> #include <bcl/bcl.hpp> #include <bcl/backends/experimental/nvshmem/backend.hpp> #include <bcl/containers/experimental/cuda/CudaMatrix.hpp> #include <bcl/containers/experimental/cuda/launch_kernel.cuh> #include <thrust/sort.h> #include <bcl/containers/experimental/cuda/CudaSPMatrix.hpp> #include <bcl/containers/experimental/cuda/algorithms/algorithm.hpp> #include <unordered_map> #include <chrono> #include <essl.h> int main(int argc, char** argv) { BCL::init(16); BCL::cuda::init(); using T = float; using index_type = int64_t; bool verify_result = false; std::string fname = std::string(argv[1]); // Number of vecs in SpMM (width of multi-vec, matrix) size_t num_vecs = std::atoi(argv[2]); auto matrix_shape = BCL::matrix_io::matrix_info(fname); size_t m = matrix_shape.shape[0]; size_t k = matrix_shape.shape[1]; size_t n = num_vecs; BCL::print("Choosing blocks...\n"); auto blocks = BCL::block_matmul(m, n, k); using allocator_type = BCL::cuda::bcl_allocator<T>; using indexing_type = BCL::cuda::RowMajorIndexing; BCL::print("Reading matrices...\n"); BCL::cuda::SPMatrix<T, index_type> a(fname, std::move(blocks[0])); BCL::cuda::Matrix<T, indexing_type> b(k, n, std::move(blocks[1])); BCL::cuda::Matrix<T, indexing_type> c(m, n, std::move(blocks[2])); b = 1; c = 0; BCL::cuda::barrier(); BCL::print("Info:\n"); if (BCL::rank() == 0) { printf("A:\n"); a.print_info(); printf("B:\n"); b.print_info(); printf("C:\n"); c.print_info(); } using queue_type = BCL::ChecksumQueue<BCL::cuda::CudaMatrix_ptr<T>, BCL::djb2_hash<BCL::cuda::CudaMatrix_ptr<T>>>; std::vector<queue_type> queues; for (size_t i = 0; i < BCL::nprocs(); i++) { queues.emplace_back(queue_type(i, a.grid_shape()[1]+8)); } cusparseStatus_t status = cusparseCreate(&BCL::cuda::bcl_cusparse_handle_); // printf("A taking %lf GB, B %lf GB\n", 1.0e-9*a.my_mem(), 1.0e-9*b.my_mem()); assert(a.grid_shape()[1] == b.grid_shape()[0]); auto ws_grid = generate_grid(a); BCL::cuda::barrier(); auto begin = std::chrono::high_resolution_clock::now(); BCL::cuda::gemm_workstealing(a, b, c, queues, ws_grid); BCL::cuda::barrier(); auto end = std::chrono::high_resolution_clock::now(); double duration = std::chrono::duration<double>(end - begin).count(); double max_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::max<double>{}); double max_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::max<double>{}); double max_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::max<double>{}); double max_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::max<double>{}); double max_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::max<double>{}); double min_issue = BCL::allreduce(BCL::cuda::duration_issue, BCL::min<double>{}); double min_sync = BCL::allreduce(BCL::cuda::duration_sync, BCL::min<double>{}); double min_compute = BCL::allreduce(BCL::cuda::duration_compute, BCL::min<double>{}); double min_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, BCL::min<double>{}); double min_barrier = BCL::allreduce(BCL::cuda::duration_barrier, BCL::min<double>{}); BCL::cuda::duration_issue = BCL::allreduce(BCL::cuda::duration_issue, std::plus<double>{}); BCL::cuda::duration_sync = BCL::allreduce(BCL::cuda::duration_sync, std::plus<double>{}); BCL::cuda::duration_compute = BCL::allreduce(BCL::cuda::duration_compute, std::plus<double>{}); BCL::cuda::duration_accumulate = BCL::allreduce(BCL::cuda::duration_accumulate, std::plus<double>{}); BCL::cuda::duration_barrier = BCL::allreduce(BCL::cuda::duration_barrier, std::plus<double>{}); BCL::print("SpMM took %lf s\n", duration); if (BCL::rank() == 0) { printf("duration_issue %lf (%lf -> %lf)\n", BCL::cuda::duration_issue / BCL::nprocs(), min_issue, max_issue); printf("duration_sync %lf (%lf -> %lf)\n", BCL::cuda::duration_sync / BCL::nprocs(), min_sync, max_sync); printf("duration_compute %lf (%lf -> %lf)\n", BCL::cuda::duration_compute / BCL::nprocs(), min_compute, max_compute); printf("duration_accumulate %lf (%lf -> %lf)\n", BCL::cuda::duration_accumulate / BCL::nprocs(), min_accumulate, max_accumulate); printf("duration_barrier %lf (%lf -> %lf)\n", BCL::cuda::duration_barrier / BCL::nprocs(), min_barrier, max_barrier); } BCL::barrier(); fflush(stdout); BCL::barrier(); if (BCL::rank() == 0 && verify_result) { fprintf(stderr, "Reading in matrix...\n"); BCL::CSRMatrix<T, index_type> mat(fname); fprintf(stderr, "Copying to GPU...\n"); auto local_a = BCL::cuda::to_gpu<T, index_type, allocator_type>(mat); fprintf(stderr, "Creating local b...\n"); BCL::cuda::CudaMatrix<T, allocator_type, indexing_type> local_b({k, n}); fprintf(stderr, "Creating local c...\n"); BCL::cuda::CudaMatrix<T, allocator_type, indexing_type> local_c({m, n}); fprintf(stderr, "Writing to matrices...\n"); local_b = 1; local_c = 0; fprintf(stderr, "Doing local spmm...\n"); BCL::cuda::spmm_cusparse(local_a, local_b, local_c); cudaDeviceSynchronize(); fprintf(stderr, "Getting C matrix...\n"); auto distributed_c = c.get_matrix(); std::vector<T> local_data(local_c.size()); cudaMemcpy(local_data.data(), local_c.data(), sizeof(T)*local_c.size(), cudaMemcpyDeviceToHost); assert(distributed_c.size() == local_c.size()); fprintf(stderr, "Checking accuracy...\n"); T eps = 1.0e-4; size_t matching = 0; bool print = true; for (size_t i = 0; i < c.shape()[0]; i++) { for (size_t j = 0; j < c.shape()[1]; j++) { size_t d_idx = i*c.shape()[1] + j; size_t l_idx = indexing_type().index(i, j, local_c.ld()); if (std::abs(distributed_c[d_idx] - local_data[l_idx]) > eps) { // assert(false); if (print) { printf("O(%lu, %lu) %2.2lf != %2.2lf\n", i, j, distributed_c[d_idx], local_data[l_idx]); } } else { if (print) { // printf("X %2.2lf == %2.2lf\n", distributed_c[d_idx], local_data[l_idx]); } matching++; } } if (print) { // printf("\n"); } } /* for (size_t i = 0; i < distributed_c.size(); i++) { if (std::abs(distributed_c[i] - local_data[i]) > eps) { // fprintf(stderr, "[%lu] %f != %f\n", i, distributed_c[i], local_data[i]); } else { matching++; } } */ printf("%lu / %lu (%lf%%) indices match.\n", matching, distributed_c.size(), 100 * ((double) matching) / distributed_c.size()); if (matching == distributed_c.size()) { printf("OK.\n"); } else { printf("***FAILED!***\n"); } } BCL::finalize(); return 0; }
68e1d3ff90a0ac364a36711890349904a524bf5c.hip
// !!! This is a file automatically generated by hipify!!! // System includes #include <iostream> #include <string> #include <hip/hip_runtime.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include "plugin_factory.h" #include "cape.cuh" #include "cuda_helper.h" #include "metutil.h" #include "util.h" #include <NFmiGribPacking.h> #include "forecast_time.h" #include "level.h" #define HIMAN_AUXILIARY_INCLUDE #include "cache.h" #include "fetcher.h" #include "hitool.h" #undef HIMAN_AUXILIARY_INCLUDE using namespace himan; using namespace himan::plugin; himan::level cape_cuda::itsBottomLevel; const unsigned char FCAPE = (1 << 2); const unsigned char FCAPE3km = (1 << 0); extern double Max(const std::vector<double>& vec); template <typename T> __global__ void InitializeArrayKernel(T* d_arr, T val, size_t N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (; idx < N; idx += stride) { d_arr[idx] = val; } } template <typename T> void InitializeArray(T* d_arr, T val, size_t N, hipStream_t& stream) { const int blockSize = 128; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); hipLaunchKernelGGL(( InitializeArrayKernel<T>), dim3(gridSize), dim3(blockSize), 0, stream, d_arr, val, N); } template <typename T> __global__ void MultiplyWith(T* d_arr, T val, size_t N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (; idx < N; idx += stride) { d_arr[idx] = d_arr[idx] * val; } } template <typename T> void MultiplyWith(T* d_arr, T val, size_t N, hipStream_t& stream) { const int blockSize = 128; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); hipLaunchKernelGGL(( MultiplyWith<T>), dim3(gridSize), dim3(blockSize), 0, stream, d_arr, val, N); } info_simple* PrepareInfo(std::shared_ptr<himan::info> fullInfo, hipStream_t& stream) { auto h_info = fullInfo->ToSimple(); size_t N = h_info->size_x * h_info->size_y; assert(N > 0); // 1. Reserve memory at device for unpacked data double* d_arr = 0; CUDA_CHECK(hipMalloc(reinterpret_cast<double**>(&d_arr), N * sizeof(double))); // 2. Unpack if needed, leave data to device and simultaneously copy it back to cpu (himan cache) auto tempGrid = fullInfo->Grid(); if (tempGrid->IsPackedData()) { assert(tempGrid->PackedData().ClassName() == "simple_packed" || tempGrid->PackedData().ClassName() == "jpeg_packed"); assert(N > 0); assert(tempGrid->Data().Size() == N); double* arr = const_cast<double*>(tempGrid->Data().ValuesAsPOD()); CUDA_CHECK(hipHostRegister(reinterpret_cast<void*>(arr), sizeof(double) * N, 0)); assert(arr); tempGrid->PackedData().Unpack(d_arr, N, &stream); CUDA_CHECK(hipMemcpyAsync(arr, d_arr, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); tempGrid->PackedData().Clear(); auto c = GET_PLUGIN(cache); CUDA_CHECK(hipStreamSynchronize(stream)); c->Insert(*fullInfo); CUDA_CHECK(hipHostUnregister(arr)); h_info->packed_values = 0; } else { CUDA_CHECK( hipMemcpyAsync(d_arr, fullInfo->Data().ValuesAsPOD(), sizeof(double) * N, hipMemcpyHostToDevice, stream)); } h_info->values = d_arr; return h_info; } std::shared_ptr<himan::info> Fetch(const std::shared_ptr<const plugin_configuration> conf, const himan::forecast_time& theTime, const himan::level& theLevel, const himan::param& theParam, const himan::forecast_type& theType) { try { auto f = GET_PLUGIN(fetcher); return f->Fetch(conf, theTime, theLevel, theParam, theType, true); } catch (HPExceptionType& e) { if (e != kFileDataNotFound) { throw std::runtime_error("cape_cuda::Fetch(): Unable to proceed"); } return std::shared_ptr<info>(); } } __global__ void CopyLFCIteratorValuesKernel(double* __restrict__ d_Titer, const double* __restrict__ d_Tparcel, double* __restrict__ d_Piter, info_simple d_Penv) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < d_Penv.size_x * d_Penv.size_y) { if (d_Tparcel[idx] != kFloatMissing && d_Penv.values[idx] != kFloatMissing) { d_Titer[idx] = d_Tparcel[idx]; d_Piter[idx] = d_Penv.values[idx]; } } } __global__ void LiftLCLKernel(const double* __restrict__ d_P, const double* __restrict__ d_T, const double* __restrict__ d_PLCL, info_simple d_Ptarget, double* __restrict__ d_Tparcel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < d_Ptarget.size_x * d_Ptarget.size_y) { assert(d_P[idx] > 10); assert(d_P[idx] < 1500 || d_P[idx] == kFloatMissing); assert(d_Ptarget.values[idx] > 10); assert(d_Ptarget.values[idx] < 1500 || d_Ptarget.values[idx] == kFloatMissing); assert(d_T[idx] > 100); assert(d_T[idx] < 350 || d_T[idx] == kFloatMissing); double T = metutil::LiftLCL_(d_P[idx] * 100, d_T[idx], d_PLCL[idx] * 100, d_Ptarget.values[idx] * 100); assert(T > 100); assert(T < 350 || T == kFloatMissing); d_Tparcel[idx] = T; } } __global__ void MoistLiftKernel(const double* __restrict__ d_T, const double* __restrict__ d_P, info_simple d_Ptarget, double* __restrict__ d_Tparcel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T); assert(d_P); if (idx < d_Ptarget.size_x * d_Ptarget.size_y) { assert(d_P[idx] > 10); assert(d_P[idx] < 1500 || d_P[idx] == kFloatMissing); assert(d_Ptarget.values[idx] > 10); assert(d_Ptarget.values[idx] < 1500 || d_Ptarget.values[idx] == kFloatMissing); assert(d_T[idx] > 100); assert(d_T[idx] < 350 || d_T[idx] == kFloatMissing); double T = metutil::MoistLiftA_(d_P[idx] * 100, d_T[idx], d_Ptarget.values[idx] * 100); assert(T > 100); assert(T < 350 || T == kFloatMissing); d_Tparcel[idx] = T; } } __global__ void CAPEKernel(info_simple d_Tenv, info_simple d_Penv, info_simple d_Zenv, info_simple d_prevTenv, info_simple d_prevPenv, info_simple d_prevZenv, const double* __restrict d_Tparcel, const double* __restrict d_prevTparcel, const double* __restrict__ d_LFCT, const double* __restrict__ d_LFCP, double* __restrict__ d_CAPE, double* __restrict__ d_CAPE1040, double* __restrict__ d_CAPE3km, double* __restrict__ d_ELT, double* __restrict__ d_ELP, unsigned char* __restrict__ d_found, int d_curLevel, int d_breakLevel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < d_Tenv.size_x * d_Tenv.size_y && d_found[idx] != 4) { double Tenv = d_Tenv.values[idx]; assert(Tenv > 100.); double Penv = d_Penv.values[idx]; // hPa assert(Penv < 1200.); double Zenv = d_Zenv.values[idx]; // m double prevTenv = d_prevTenv.values[idx]; // K assert(prevTenv > 100.); double prevPenv = d_prevPenv.values[idx]; // hPa assert(prevPenv < 1200.); double prevZenv = d_prevZenv.values[idx]; // m double Tparcel = d_Tparcel[idx]; // K assert(Tparcel > 100. || Tparcel == kFloatMissing); double prevTparcel = d_prevTparcel[idx]; // K assert(prevTparcel > 100. || Tparcel == kFloatMissing); double LFCP = d_LFCP[idx]; // hPa assert(LFCP < 1200.); double LFCT = d_LFCT[idx]; // K assert(LFCT > 100.); if (Penv == kFloatMissing || Tenv == kFloatMissing || Zenv == kFloatMissing || prevZenv == kFloatMissing || Tparcel == kFloatMissing || Penv > LFCP) { // Missing data or current grid point is below LFC return; } if (prevTparcel == kFloatMissing && Tparcel != kFloatMissing) { // When rising above LFC, get accurate value of Tenv at that level so that even small amounts of CAPE // (and EL!) values can be determined. prevTenv = himan::numerical_functions::interpolation::Linear(LFCP, prevPenv, Penv, prevTenv, Tenv); prevZenv = himan::numerical_functions::interpolation::Linear(LFCP, prevPenv, Penv, prevZenv, Zenv); prevPenv = LFCP; // LFC pressure prevTparcel = LFCT; // LFC temperature // If LFC was found close to lower hybrid level, the linear interpolation and moist lift will result // to same values. In this case CAPE integration fails as there is no area formed between environment // and parcel temperature. The result for this is that LFC is found but EL is not found. To prevent // this, warm the parcel value just slightly so that a miniscule CAPE area is formed and EL is found. if (fabs(prevTparcel - prevTenv) < 0.0001) { prevTparcel += 0.0001; } } if (d_curLevel < d_breakLevel && (Tenv - Tparcel) > 25.) { // Temperature gap between environment and parcel too large --> abort search. // Only for values higher in the atmosphere, to avoid the effects of inversion d_found[idx] |= FCAPE; } else { if (prevZenv >= 3000. && Zenv >= 3000.) { d_found[idx] |= FCAPE3km; } if ((d_found[idx] & FCAPE3km) == 0) { double C = CAPE::CalcCAPE3km(Tenv, prevTenv, Tparcel, prevTparcel, Penv, prevPenv, Zenv, prevZenv); d_CAPE3km[idx] += C; assert(d_CAPE3km[idx] < 3000.); // 3000J/kg, not 3000m assert(d_CAPE3km[idx] >= 0); } double C = CAPE::CalcCAPE1040(Tenv, prevTenv, Tparcel, prevTparcel, Penv, prevPenv, Zenv, prevZenv); d_CAPE1040[idx] += C; assert(d_CAPE1040[idx] < 5000.); assert(d_CAPE1040[idx] >= 0); double CAPE, ELT, ELP; CAPE::CalcCAPE(Tenv, prevTenv, Tparcel, prevTparcel, Penv, prevPenv, Zenv, prevZenv, CAPE, ELT, ELP); d_CAPE[idx] += CAPE; assert(CAPE >= 0.); assert(d_CAPE[idx] < 8000); if (ELT != kFloatMissing) { d_ELT[idx] = ELT; d_ELP[idx] = ELP; } } } } __global__ void CINKernel(info_simple d_Tenv, info_simple d_prevTenv, info_simple d_Penv, info_simple d_prevPenv, info_simple d_Zenv, info_simple d_prevZenv, const double* __restrict__ d_Tparcel, const double* __restrict__ d_prevTparcel, const double* __restrict__ d_PLCL, const double* __restrict__ d_PLFC, const double* __restrict__ d_Psource, double* __restrict__ d_cinh, unsigned char* __restrict__ d_found) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < d_Tenv.size_x * d_Tenv.size_y && d_found[idx] == 0) { double Tenv = d_Tenv.values[idx]; // K assert(Tenv >= 150.); const double prevTenv = d_prevTenv.values[idx]; double Penv = d_Penv.values[idx]; // hPa assert(Penv < 1200. || Penv == kFloatMissing); const double prevPenv = d_prevPenv.values[idx]; double Tparcel = d_Tparcel[idx]; // K assert(Tparcel >= 150. || Tparcel == kFloatMissing); const double prevTparcel = d_prevTparcel[idx]; double PLFC = d_PLFC[idx]; // hPa assert(PLFC < 1200. || PLFC == kFloatMissing); double PLCL = d_PLCL[idx]; // hPa assert(PLCL < 1200. || PLCL == kFloatMissing); double Zenv = d_Zenv.values[idx]; // m double prevZenv = d_prevZenv.values[idx]; // m // Make sure we have passed the starting level if (Penv <= d_Psource[idx]) { if (Penv <= PLFC) { // reached max height d_found[idx] = 1; // Integrate the final piece from previous level to LFC level if (prevTparcel == kFloatMissing || prevPenv == kFloatMissing || prevTenv == kFloatMissing) { Tparcel = kFloatMissing; // unable to proceed with CIN integration } else { // First get LFC height in meters Zenv = numerical_functions::interpolation::Linear(PLFC, prevPenv, Penv, prevZenv, Zenv); // LFC environment temperature value Tenv = numerical_functions::interpolation::Linear(PLFC, prevPenv, Penv, prevTenv, Tenv); // LFC T parcel value Tparcel = numerical_functions::interpolation::Linear(PLFC, prevPenv, Penv, prevTparcel, Tparcel); Penv = PLFC; assert(Zenv > prevZenv); } } if (Penv < PLCL && Tparcel != kFloatMissing) { // Above LCL, switch to virtual temperature Tparcel = metutil::VirtualTemperature_(Tparcel, Penv * 100); Tenv = metutil::VirtualTemperature_(Tenv, Penv * 100); } if (Tparcel != kFloatMissing) { d_cinh[idx] += CAPE::CalcCIN(Tenv, prevTenv, Tparcel, prevTparcel, Penv, prevPenv, Zenv, prevZenv); assert(d_cinh[idx] <= 0); } } } } __global__ void LFCKernel(info_simple d_T, info_simple d_P, info_simple d_prevT, info_simple d_prevP, double* __restrict__ d_Tparcel, const double* __restrict__ d_prevTparcel, const double* __restrict__ d_LCLT, const double* __restrict__ d_LCLP, double* __restrict__ d_LFCT, double* __restrict__ d_LFCP, unsigned char* __restrict__ d_found, int d_curLevel, int d_breakLevel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T.values); assert(d_P.values); if (idx < d_T.size_x * d_T.size_y && d_found[idx] == 0) { double Tparcel = d_Tparcel[idx]; double prevTparcel = d_prevTparcel[idx]; double Tenv = d_T.values[idx]; assert(Tenv < 350.); assert(Tenv > 100.); double prevTenv = d_prevT.values[idx]; assert(prevTenv < 350.); assert(prevTenv > 100.); double Penv = d_P.values[idx]; double prevPenv = d_prevP.values[idx]; double LCLP = d_LCLP[idx]; if (Tparcel != kFloatMissing && d_curLevel < d_breakLevel && (Tenv - Tparcel) > 30.) { // Temperature gap between environment and parcel too large --> abort search. // Only for values higher in the atmosphere, to avoid the effects of inversion d_found[idx] = 1; } if (Tparcel != kFloatMissing && Penv <= LCLP && Tparcel > Tenv && d_found[idx] == 0) { d_found[idx] = 1; if (prevTparcel == kFloatMissing) { prevTparcel = d_LCLT[idx]; // previous is LCL assert(d_LCLT[idx] != kFloatMissing); } if (fabs(prevTparcel - prevTenv) < 0.0001) { d_LFCT[idx] = Tparcel; d_LFCP[idx] = Penv; } else { auto intersection = CAPE::GetPointOfIntersection(point(Tenv, Penv), point(prevTenv, prevPenv), point(Tparcel, Penv), point(prevTparcel, prevPenv)); d_LFCT[idx] = intersection.X(); d_LFCP[idx] = intersection.Y(); if (d_LFCT[idx] == kFloatMissing) { // Intersection not found, use exact level value d_LFCT[idx] = Tenv; d_LFCP[idx] = Penv; } } assert(d_LFCT[idx] > 100); assert(d_LFCT[idx] < 350); } } } __global__ void ThetaEKernel(info_simple d_T, info_simple d_RH, info_simple d_P, info_simple d_prevT, info_simple d_prevRH, info_simple d_prevP, double* __restrict__ d_maxThetaE, double* __restrict__ d_Tresult, double* __restrict__ d_TDresult, double* __restrict__ d_Presult, unsigned char* __restrict__ d_found) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T.values); assert(d_RH.values); assert(d_P.values); if (idx < d_T.size_x * d_T.size_y && d_found[idx] == 0) { double T = d_T.values[idx]; double P = d_P.values[idx]; double RH = d_RH.values[idx]; if (P == kFloatMissing || T == kFloatMissing || RH == kFloatMissing) { d_found[idx] = 1; } else { if (P < 600.) { // Cut search if reach level 600hPa // Linearly interpolate temperature and humidity values to 600hPa, to check // if highest theta e is found there T = numerical_functions::interpolation::Linear(600., P, d_prevP.values[idx], T, d_prevT.values[idx]); RH = numerical_functions::interpolation::Linear(600., P, d_prevP.values[idx], RH, d_prevRH.values[idx]); d_found[idx] = 1; // Make sure this is the last time we access this grid point P = 600.; } double TD = metutil::DewPointFromRH_(T, RH); double& refThetaE = d_maxThetaE[idx]; double ThetaE = metutil::smarttool::ThetaE_(T, RH, P * 100); if (ThetaE >= refThetaE) { refThetaE = ThetaE; d_Tresult[idx] = T; d_TDresult[idx] = TD; d_Presult[idx] = P; } } } } __global__ void MixingRatioKernel(const double* __restrict__ d_T, double* __restrict__ d_P, const double* __restrict__ d_RH, double* __restrict__ d_Tpot, double* __restrict__ d_MR, size_t N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T); assert(d_RH); assert(d_P); if (idx < N) { double T = d_T[idx]; double P = d_P[idx]; double RH = d_RH[idx]; assert((T > 150 && T < 350) || T == kFloatMissing); assert((P > 100 && P < 1500) || P == kFloatMissing); assert((RH >= 0 && RH < 102) || RH == kFloatMissing); if (T == kFloatMissing || P == kFloatMissing || RH == kFloatMissing) { d_P[idx] = kFloatMissing; } else { d_Tpot[idx] = metutil::Theta_(T, 100 * P); d_MR[idx] = metutil::smarttool::MixingRatio_(T, RH, 100 * P); d_P[idx] = P - 2.0; } } } __global__ void MixingRatioFinalizeKernel(double* __restrict__ d_T, double* __restrict__ d_TD, info_simple d_P, const double* __restrict__ d_Tpot, const double* __restrict__ d_MR, size_t N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T); assert(d_P.values); if (idx < N) { double P = d_P.values[idx]; double MR = d_MR[idx]; double Tpot = d_Tpot[idx]; assert((P > 100 && P < 1500) || P == kFloatMissing); if (Tpot != kFloatMissing && P != kFloatMissing) { d_T[idx] = Tpot * pow((P / 1000.), 0.2854); } double T = d_T[idx]; if (T != kFloatMissing && MR != kFloatMissing && P != kFloatMissing) { double Es = metutil::Es_(T); // Saturated water vapor pressure double E = metutil::E_(MR, 100 * P); double RH = E / Es * 100; d_TD[idx] = metutil::DewPointFromRH_(T, RH); } } } cape_source cape_cuda::GetHighestThetaEValuesGPU(const std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo) { himan::level curLevel = itsBottomLevel; const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); double* d_maxThetaE = 0; double* d_Tresult = 0; double* d_TDresult = 0; double* d_Presult = 0; unsigned char* d_found = 0; CUDA_CHECK(hipMalloc((double**)&d_maxThetaE, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_Tresult, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_TDresult, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_Presult, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_found, sizeof(unsigned char) * N)); InitializeArray<double>(d_maxThetaE, -1, N, stream); InitializeArray<double>(d_Tresult, kFloatMissing, N, stream); InitializeArray<double>(d_TDresult, kFloatMissing, N, stream); InitializeArray<double>(d_Presult, kFloatMissing, N, stream); InitializeArray<unsigned char>(d_found, 0, N, stream); info_simple* h_prevT = 0; info_simple* h_prevP = 0; info_simple* h_prevRH = 0; while (true) { auto TInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto RHInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("RH-PRCNT"), myTargetInfo->ForecastType()); auto PInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); if (!TInfo || !RHInfo || !PInfo) { return std::make_tuple(std::vector<double>(), std::vector<double>(), std::vector<double>()); } auto h_T = PrepareInfo(TInfo, stream); auto h_P = PrepareInfo(PInfo, stream); auto h_RH = PrepareInfo(RHInfo, stream); assert(h_T->values); assert(h_RH->values); assert(h_P->values); bool release = true; if (!h_prevT) { // first time h_prevT = new info_simple(*h_T); h_prevP = new info_simple(*h_P); h_prevRH = new info_simple(*h_RH); release = false; } hipLaunchKernelGGL(( ThetaEKernel), dim3(gridSize), dim3(blockSize), 0, stream, *h_T, *h_RH, *h_P, *h_prevT, *h_prevRH, *h_prevP, d_maxThetaE, d_Tresult, d_TDresult, d_Presult, d_found); std::vector<unsigned char> found(N, 0); CUDA_CHECK(hipMemcpyAsync(&found[0], d_found, sizeof(unsigned char) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); if (release) { CUDA_CHECK(hipFree(h_prevP->values)); CUDA_CHECK(hipFree(h_prevRH->values)); CUDA_CHECK(hipFree(h_prevT->values)); } delete h_prevP; delete h_prevT; delete h_prevRH; h_prevP = h_P; h_prevRH = h_RH; h_prevT = h_T; curLevel.Value(curLevel.Value() - 1); size_t foundCount = std::count(found.begin(), found.end(), 1); if (foundCount == found.size()) break; } CUDA_CHECK(hipFree(h_prevP->values)); CUDA_CHECK(hipFree(h_prevRH->values)); CUDA_CHECK(hipFree(h_prevT->values)); delete h_prevP; delete h_prevT; delete h_prevRH; std::vector<double> Tthetae(myTargetInfo->Data().Size()); std::vector<double> TDthetae(myTargetInfo->Data().Size()); std::vector<double> Pthetae(myTargetInfo->Data().Size()); CUDA_CHECK(hipMemcpyAsync(&Tthetae[0], d_Tresult, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&TDthetae[0], d_TDresult, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&Pthetae[0], d_Presult, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipFree(d_maxThetaE)); CUDA_CHECK(hipFree(d_Tresult)); CUDA_CHECK(hipFree(d_TDresult)); CUDA_CHECK(hipFree(d_Presult)); CUDA_CHECK(hipFree(d_found)); CUDA_CHECK(hipStreamDestroy(stream)); return std::make_tuple(Tthetae, TDthetae, Pthetae); } cape_source cape_cuda::Get500mMixingRatioValuesGPU(std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo) { const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); level curLevel = itsBottomLevel; auto h = GET_PLUGIN(hitool); h->Configuration(conf); h->Time(myTargetInfo->Time()); h->ForecastType(myTargetInfo->ForecastType()); modifier_mean tp, mr; tp.HeightInMeters(false); mr.HeightInMeters(false); auto f = GET_PLUGIN(fetcher); auto PInfo = f->Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType(), false); if (!PInfo) { return std::make_tuple(std::vector<double>(), std::vector<double>(), std::vector<double>()); } else { // Himan specialty: empty data grid size_t miss = 0; for (auto& val : VEC(PInfo)) { if (val == kFloatMissing) miss++; } if (PInfo->Data().MissingCount() == PInfo->Data().Size()) { return std::make_tuple(std::vector<double>(), std::vector<double>(), std::vector<double>()); } } auto PVec = VEC(PInfo); auto P500m = h->VerticalValue(param("P-HPA"), 500.); h->HeightUnit(kHPa); tp.LowerHeight(PVec); mr.LowerHeight(PVec); tp.UpperHeight(P500m); mr.UpperHeight(P500m); double* d_Tpot = 0; double* d_MR = 0; double* d_T = 0; double* d_RH = 0; double* d_P = 0; double* d_TD = 0; CUDA_CHECK(hipMalloc((double**)&d_Tpot, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_MR, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_T, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_RH, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_P, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_TD, N * sizeof(double))); InitializeArray<double>(d_Tpot, kFloatMissing, N, stream); InitializeArray<double>(d_MR, kFloatMissing, N, stream); while (true) { auto TVec = h->VerticalValue(param("T-K"), PVec); auto RHVec = h->VerticalValue(param("RH-PRCNT"), PVec); CUDA_CHECK(hipMemcpyAsync(d_T, &TVec[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_RH, &RHVec[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_P, &PVec[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); hipLaunchKernelGGL(( MixingRatioKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_T, d_P, d_RH, d_Tpot, d_MR, N); std::vector<double> Tpot(N, kFloatMissing); std::vector<double> MR(N, kFloatMissing); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipMemcpyAsync(&Tpot[0], d_Tpot, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&MR[0], d_MR, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); tp.Process(Tpot, PVec); mr.Process(MR, PVec); size_t foundCount = tp.HeightsCrossed(); assert(tp.HeightsCrossed() == mr.HeightsCrossed()); if (foundCount == N) { break; } CUDA_CHECK(hipMemcpyAsync(&PVec[0], d_P, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); } CUDA_CHECK(hipStreamSynchronize(stream)); // Calculate averages auto Tpot = tp.Result(); auto MR = mr.Result(); // Copy averages to GPU for final calculation CUDA_CHECK(hipMemcpyAsync(d_Tpot, &Tpot[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_MR, &MR[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); auto Psurf = Fetch(conf, myTargetInfo->Time(), itsBottomLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto h_P = PrepareInfo(Psurf, stream); InitializeArray<double>(d_T, kFloatMissing, N, stream); InitializeArray<double>(d_TD, kFloatMissing, N, stream); std::vector<double> T(Tpot.size(), kFloatMissing); std::vector<double> TD(T.size(), kFloatMissing); hipLaunchKernelGGL(( MixingRatioFinalizeKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_T, d_TD, *h_P, d_Tpot, d_MR, N); CUDA_CHECK(hipMemcpyAsync(&T[0], d_T, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&TD[0], d_TD, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipFree(d_Tpot)); CUDA_CHECK(hipFree(d_MR)); CUDA_CHECK(hipFree(d_RH)); CUDA_CHECK(hipFree(d_P)); CUDA_CHECK(hipFree(d_T)); CUDA_CHECK(hipFree(d_TD)); CUDA_CHECK(hipStreamDestroy(stream)); return std::make_tuple(T, TD, VEC(Psurf)); } std::pair<std::vector<double>, std::vector<double>> cape_cuda::GetLFCGPU( const std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo, std::vector<double>& T, std::vector<double>& P, std::vector<double>& TenvLCL) { auto h = GET_PLUGIN(hitool); h->Configuration(conf); h->Time(myTargetInfo->Time()); h->ForecastType(myTargetInfo->ForecastType()); h->HeightUnit(kHPa); const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); double* d_TenvLCL = 0; double* d_Titer = 0; double* d_Piter = 0; double* d_LCLP = 0; double* d_LCLT = 0; double* d_LFCT = 0; double* d_LFCP = 0; double* d_Tparcel = 0; double* d_prevTparcel = 0; unsigned char* d_found = 0; CUDA_CHECK(hipMalloc((double**)&d_TenvLCL, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_Piter, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_Titer, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_LCLT, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_LCLP, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_LFCT, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_LFCP, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_found, sizeof(unsigned char) * N)); CUDA_CHECK(hipMalloc((double**)&d_Tparcel, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_prevTparcel, sizeof(double) * N)); CUDA_CHECK(hipMemcpyAsync(d_TenvLCL, &TenvLCL[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_Titer, &T[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_Piter, &P[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_LCLT, d_Titer, sizeof(double) * N, hipMemcpyDeviceToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_LCLP, d_Piter, sizeof(double) * N, hipMemcpyDeviceToDevice, stream)); InitializeArray<double>(d_LFCT, kFloatMissing, N, stream); InitializeArray<double>(d_LFCP, kFloatMissing, N, stream); InitializeArray<double>(d_prevTparcel, kFloatMissing, N, stream); InitializeArray<unsigned char>(d_found, 0, N, stream); // For each grid point find the hybrid level that's below LCL and then pick the lowest level // among all grid points; most commonly it's the lowest hybrid level auto levels = h->LevelForHeight(myTargetInfo->Producer(), ::Max(P)); level curLevel = levels.first; auto prevPenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto prevTenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto h_prevTenv = PrepareInfo(prevTenvInfo, stream); auto h_prevPenv = PrepareInfo(prevPenvInfo, stream); assert(h_prevTenv->values); assert(h_prevPenv->values); curLevel.Value(curLevel.Value() - 1); std::vector<unsigned char> found(N, 0); std::vector<double> LFCT(N, kFloatMissing); std::vector<double> LFCP(N, kFloatMissing); for (size_t i = 0; i < N; i++) { if ((T[i] - TenvLCL[i]) > 0.001) { found[i] = 1; LFCT[i] = T[i]; LFCP[i] = P[i]; } } CUDA_CHECK(hipMemcpyAsync(d_found, &found[0], sizeof(unsigned char) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_LFCT, &LFCT[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_LFCP, &LFCP[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); auto hPa450 = h->LevelForHeight(myTargetInfo->Producer(), 450.); auto hPa150 = h->LevelForHeight(myTargetInfo->Producer(), 150.); while (curLevel.Value() > hPa150.first.Value()) { // Get environment temperature and pressure values for this level auto TenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto PenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto h_Penv = PrepareInfo(PenvInfo, stream); auto h_Tenv = PrepareInfo(TenvInfo, stream); // Lift the particle from previous level to this level. In the first revolution // of this loop the starting level is LCL. If target level level is below current level // (ie. we would be lowering the particle) missing value is returned. hipLaunchKernelGGL(( MoistLiftKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_Titer, d_Piter, *h_Penv, d_Tparcel); hipLaunchKernelGGL(( LFCKernel), dim3(gridSize), dim3(blockSize), 0, stream, *h_Tenv, *h_Penv, *h_prevTenv, *h_prevPenv, d_Tparcel, d_prevTparcel, d_LCLT, d_LCLP, d_LFCT, d_LFCP, d_found, curLevel.Value(), hPa450.first.Value()); CUDA_CHECK(hipMemcpyAsync(&found[0], d_found, sizeof(unsigned char) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipFree(h_prevPenv->values)); CUDA_CHECK(hipFree(h_prevTenv->values)); delete h_prevPenv; delete h_prevTenv; h_prevPenv = h_Penv; h_prevTenv = h_Tenv; CUDA_CHECK(hipStreamSynchronize(stream)); if (static_cast<size_t>(std::count(found.begin(), found.end(), 1)) == found.size()) break; CUDA_CHECK(hipMemcpyAsync(d_prevTparcel, d_Tparcel, sizeof(double) * N, hipMemcpyDeviceToDevice, stream)); curLevel.Value(curLevel.Value() - 1); } CUDA_CHECK(hipFree(h_prevPenv->values)); CUDA_CHECK(hipFree(h_prevTenv->values)); delete h_prevPenv; delete h_prevTenv; CUDA_CHECK(hipMemcpyAsync(&LFCT[0], d_LFCT, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&LFCP[0], d_LFCP, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipFree(d_LFCT)); CUDA_CHECK(hipFree(d_LFCP)); CUDA_CHECK(hipFree(d_LCLT)); CUDA_CHECK(hipFree(d_LCLP)); CUDA_CHECK(hipFree(d_Tparcel)); CUDA_CHECK(hipFree(d_prevTparcel)); CUDA_CHECK(hipFree(d_found)); CUDA_CHECK(hipFree(d_Titer)); CUDA_CHECK(hipFree(d_Piter)); CUDA_CHECK(hipFree(d_TenvLCL)); CUDA_CHECK(hipStreamDestroy(stream)); return std::make_pair(LFCT, LFCP); } void cape_cuda::GetCINGPU(const std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo, const std::vector<double>& Tsource, const std::vector<double>& Psource, const std::vector<double>& TLCL, const std::vector<double>& PLCL, const std::vector<double>& PLFC, param CINParam) { const params PParams({param("PGR-PA"), param("P-PA")}); auto h = GET_PLUGIN(hitool); h->Configuration(conf); h->Time(myTargetInfo->Time()); h->ForecastType(myTargetInfo->ForecastType()); h->HeightUnit(kHPa); forecast_time ftime = myTargetInfo->Time(); forecast_type ftype = myTargetInfo->ForecastType(); /* * Modus operandi: * * 1. Integrate from ground to LCL dry adiabatically * * This can be done always since LCL is known at all grid points * (that have source data values defined). * * 2. Integrate from LCL to LFC moist adiabatically * * Note! For some points integration will fail (no LFC found) * * We stop integrating at first time CAPE area is found! */ // Get LCL and LFC heights in meters auto ZLCL = h->VerticalValue(param("HL-M"), PLCL); auto ZLFC = h->VerticalValue(param("HL-M"), PLFC); level curLevel = itsBottomLevel; auto prevZenvInfo = Fetch(conf, ftime, curLevel, param("HL-M"), ftype); auto prevTenvInfo = Fetch(conf, ftime, curLevel, param("T-K"), ftype); auto prevPenvInfo = Fetch(conf, ftime, curLevel, param("P-HPA"), ftype); const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); auto h_prevZenv = PrepareInfo(prevZenvInfo, stream); auto h_prevTenv = PrepareInfo(prevTenvInfo, stream); auto h_prevPenv = PrepareInfo(prevPenvInfo, stream); double* d_Psource = 0; double* d_Tparcel = 0; double* d_prevTparcel = 0; double* d_Piter = 0; double* d_prevPiter = 0; double* d_Titer = 0; double* d_prevTiter = 0; double* d_PLCL = 0; double* d_PLFC = 0; double* d_cinh = 0; unsigned char* d_found = 0; CUDA_CHECK(hipMalloc((double**)&d_Psource, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_Tparcel, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_prevTparcel, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_Piter, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_Titer, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_PLCL, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_PLFC, N * sizeof(double))); CUDA_CHECK(hipMalloc((double**)&d_cinh, N * sizeof(double))); CUDA_CHECK(hipMalloc((unsigned char**)&d_found, N * sizeof(unsigned char))); InitializeArray<double>(d_cinh, 0., N, stream); InitializeArray<double>(d_Tparcel, kFloatMissing, N, stream); CUDA_CHECK(hipMemcpyAsync(d_prevTparcel, &Tsource[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_Psource, &Psource[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_Titer, &Tsource[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_Piter, &Psource[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_PLCL, &PLCL[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_PLFC, &PLFC[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); std::vector<unsigned char> found(N, 0); for (size_t i = 0; i < PLFC.size(); i++) { if (PLFC[i] == kFloatMissing) found[i] = true; } CUDA_CHECK(hipMemcpyAsync(d_found, &found[0], sizeof(unsigned char) * N, hipMemcpyHostToDevice, stream)); curLevel.Value(curLevel.Value() - 1); auto hPa100 = h->LevelForHeight(myTargetInfo->Producer(), 100.); while (curLevel.Value() > hPa100.first.Value()) { auto ZenvInfo = Fetch(conf, ftime, curLevel, param("HL-M"), ftype); auto TenvInfo = Fetch(conf, ftime, curLevel, param("T-K"), ftype); auto PenvInfo = Fetch(conf, ftime, curLevel, param("P-HPA"), ftype); auto h_Zenv = PrepareInfo(ZenvInfo, stream); auto h_Penv = PrepareInfo(PenvInfo, stream); auto h_Tenv = PrepareInfo(TenvInfo, stream); hipLaunchKernelGGL(( LiftLCLKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_Piter, d_Titer, d_PLCL, *h_Penv, d_Tparcel); hipLaunchKernelGGL(( CINKernel), dim3(gridSize), dim3(blockSize), 0, stream, *h_Tenv, *h_prevTenv, *h_Penv, *h_prevPenv, *h_Zenv, *h_prevZenv, d_Tparcel, d_prevTparcel, d_PLCL, d_PLFC, d_Psource, d_cinh, d_found); CUDA_CHECK(hipMemcpyAsync(&found[0], d_found, sizeof(unsigned char) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(d_prevTparcel, d_Tparcel, sizeof(double) * N, hipMemcpyDeviceToDevice, stream)); CUDA_CHECK(hipFree(h_prevPenv->values)); CUDA_CHECK(hipFree(h_prevTenv->values)); CUDA_CHECK(hipFree(h_prevZenv->values)); delete h_prevPenv; delete h_prevTenv; delete h_prevZenv; h_prevPenv = h_Penv; h_prevTenv = h_Tenv; h_prevZenv = h_Zenv; CUDA_CHECK(hipStreamSynchronize(stream)); if (static_cast<size_t>(std::count(found.begin(), found.end(), 1)) == found.size()) break; // preserve starting position for those grid points that have value hipLaunchKernelGGL(( CopyLFCIteratorValuesKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_Titer, d_Tparcel, d_Piter, *h_Penv); curLevel.Value(curLevel.Value() - 1); } CUDA_CHECK(hipFree(h_prevPenv->values)); CUDA_CHECK(hipFree(h_prevTenv->values)); CUDA_CHECK(hipFree(h_prevZenv->values)); delete h_prevPenv; delete h_prevTenv; delete h_prevZenv; std::vector<double> cinh(N, 0); CUDA_CHECK(hipMemcpyAsync(&cinh[0], d_cinh, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipFree(d_cinh)); CUDA_CHECK(hipFree(d_Psource)); CUDA_CHECK(hipFree(d_Tparcel)); CUDA_CHECK(hipFree(d_prevTparcel)); CUDA_CHECK(hipFree(d_Piter)); CUDA_CHECK(hipFree(d_prevPiter)); CUDA_CHECK(hipFree(d_Titer)); CUDA_CHECK(hipFree(d_prevTiter)); CUDA_CHECK(hipFree(d_PLCL)); CUDA_CHECK(hipFree(d_PLFC)); CUDA_CHECK(hipFree(d_found)); CUDA_CHECK(hipStreamDestroy(stream)); myTargetInfo->Param(CINParam); myTargetInfo->Data().Set(cinh); } void cape_cuda::GetCAPEGPU(const std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo, const std::vector<double>& T, const std::vector<double>& P, param ELTParam, param ELPParam, param CAPEParam, param CAPE1040Param, param CAPE3kmParam) { assert(T.size() == P.size()); auto h = GET_PLUGIN(hitool); h->Configuration(conf); h->Time(myTargetInfo->Time()); h->ForecastType(myTargetInfo->ForecastType()); h->HeightUnit(kHPa); // Found count determines if we have calculated all three CAPE variation for a single grid point std::vector<unsigned char> found(T.size(), 0); // No LFC --> No CAPE for (size_t i = 0; i < P.size(); i++) { if (P[i] == kFloatMissing) { found[i] |= FCAPE; } } const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); double* d_CAPE = 0; double* d_CAPE1040 = 0; double* d_CAPE3km = 0; double* d_ELT = 0; double* d_ELP = 0; double* d_Titer = 0; double* d_Piter = 0; double* d_prevTparcel = 0; double* d_Tparcel = 0; double* d_LFCT = 0; double* d_LFCP = 0; unsigned char* d_found = 0; CUDA_CHECK(hipMalloc((double**)&d_CAPE, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_CAPE1040, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_CAPE3km, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_ELP, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_ELT, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_Piter, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_Titer, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_Tparcel, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_prevTparcel, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_LFCT, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_LFCP, sizeof(double) * N)); CUDA_CHECK(hipMalloc((double**)&d_found, sizeof(unsigned char) * N)); InitializeArray<unsigned char>(d_found, 0, N, stream); CUDA_CHECK(hipMemcpyAsync(d_Titer, &T[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_prevTparcel, d_Titer, sizeof(double) * N, hipMemcpyDeviceToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_Piter, &P[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_LFCT, &T[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_LFCP, &P[0], sizeof(double) * N, hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_found, &found[0], sizeof(unsigned char) * N, hipMemcpyHostToDevice, stream)); InitializeArray<double>(d_CAPE, 0., N, stream); InitializeArray<double>(d_CAPE1040, 0., N, stream); InitializeArray<double>(d_CAPE3km, 0., N, stream); InitializeArray<double>(d_ELP, kFloatMissing, N, stream); InitializeArray<double>(d_ELT, kFloatMissing, N, stream); // For each grid point find the hybrid level that's below LFC and then pick the lowest level // among all grid points auto levels = h->LevelForHeight(myTargetInfo->Producer(), ::Max(P)); level curLevel = levels.first; auto prevZenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("HL-M"), myTargetInfo->ForecastType()); auto prevTenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto prevPenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto h_prevZenv = PrepareInfo(prevZenvInfo, stream); auto h_prevPenv = PrepareInfo(prevPenvInfo, stream); auto h_prevTenv = PrepareInfo(prevTenvInfo, stream); curLevel.Value(curLevel.Value()); auto hPa100 = h->LevelForHeight(myTargetInfo->Producer(), 100.); while (curLevel.Value() > hPa100.first.Value()) { auto PenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto TenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto ZenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("HL-M"), myTargetInfo->ForecastType()); auto h_Zenv = PrepareInfo(ZenvInfo, stream); auto h_Penv = PrepareInfo(PenvInfo, stream); auto h_Tenv = PrepareInfo(TenvInfo, stream); hipLaunchKernelGGL(( MoistLiftKernel), dim3(gridSize), dim3(blockSize), 0, stream, d_Titer, d_Piter, *h_Penv, d_Tparcel); hipLaunchKernelGGL(( CAPEKernel), dim3(gridSize), dim3(blockSize), 0, stream, *h_Tenv, *h_Penv, *h_Zenv, *h_prevTenv, *h_prevPenv, *h_prevZenv, d_Tparcel, d_prevTparcel, d_LFCT, d_LFCP, d_CAPE, d_CAPE1040, d_CAPE3km, d_ELT, d_ELP, d_found, curLevel.Value(), hPa100.first.Value()); CUDA_CHECK(hipFree(h_prevZenv->values)); CUDA_CHECK(hipFree(h_prevTenv->values)); CUDA_CHECK(hipFree(h_prevPenv->values)); CUDA_CHECK(hipMemcpyAsync(d_prevTparcel, d_Tparcel, sizeof(double) * N, hipMemcpyDeviceToDevice, stream)); delete h_prevZenv; delete h_prevPenv; delete h_prevTenv; h_prevZenv = h_Zenv; h_prevTenv = h_Tenv; h_prevPenv = h_Penv; curLevel.Value(curLevel.Value() - 1); } CUDA_CHECK(hipFree(h_prevZenv->values)); CUDA_CHECK(hipFree(h_prevTenv->values)); CUDA_CHECK(hipFree(h_prevPenv->values)); delete h_prevZenv; delete h_prevPenv; delete h_prevTenv; #if 0 // If the CAPE area is continued all the way to level 60 and beyond, we don't have an EL for that // (since integration is forcefully stopped) // In this case level 60 = EL for (size_t i = 0; i < CAPE.size(); i++) { if (CAPE[i] > 0 && ELT[i] == kFloatMissing) { TenvInfo->LocationIndex(i); PenvInfo->LocationIndex(i); ELT[i] = TenvInfo->Value(); ELP[i] = PenvInfo->Value(); } } #endif std::vector<double> CAPE(T.size(), 0); std::vector<double> CAPE1040(T.size(), 0); std::vector<double> CAPE3km(T.size(), 0); std::vector<double> ELT(T.size(), kFloatMissing); std::vector<double> ELP(T.size(), kFloatMissing); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipMemcpyAsync(&CAPE[0], d_CAPE, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&CAPE1040[0], d_CAPE1040, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&CAPE3km[0], d_CAPE3km, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&ELT[0], d_ELT, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&ELP[0], d_ELP, sizeof(double) * N, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipFree(d_Tparcel)); CUDA_CHECK(hipFree(d_prevTparcel)); CUDA_CHECK(hipFree(d_LFCT)); CUDA_CHECK(hipFree(d_LFCP)); CUDA_CHECK(hipFree(d_found)); CUDA_CHECK(hipStreamSynchronize(stream)); CUDA_CHECK(hipFree(d_CAPE)); CUDA_CHECK(hipFree(d_CAPE1040)); CUDA_CHECK(hipFree(d_CAPE3km)); CUDA_CHECK(hipFree(d_ELT)); CUDA_CHECK(hipFree(d_ELP)); myTargetInfo->Param(ELTParam); myTargetInfo->Data().Set(ELT); myTargetInfo->Param(ELPParam); myTargetInfo->Data().Set(ELP); myTargetInfo->Param(CAPEParam); myTargetInfo->Data().Set(CAPE); myTargetInfo->Param(CAPE1040Param); myTargetInfo->Data().Set(CAPE1040); myTargetInfo->Param(CAPE3kmParam); myTargetInfo->Data().Set(CAPE3km); }
68e1d3ff90a0ac364a36711890349904a524bf5c.cu
// System includes #include <iostream> #include <string> #include <cuda_runtime.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include "plugin_factory.h" #include "cape.cuh" #include "cuda_helper.h" #include "metutil.h" #include "util.h" #include <NFmiGribPacking.h> #include "forecast_time.h" #include "level.h" #define HIMAN_AUXILIARY_INCLUDE #include "cache.h" #include "fetcher.h" #include "hitool.h" #undef HIMAN_AUXILIARY_INCLUDE using namespace himan; using namespace himan::plugin; himan::level cape_cuda::itsBottomLevel; const unsigned char FCAPE = (1 << 2); const unsigned char FCAPE3km = (1 << 0); extern double Max(const std::vector<double>& vec); template <typename T> __global__ void InitializeArrayKernel(T* d_arr, T val, size_t N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (; idx < N; idx += stride) { d_arr[idx] = val; } } template <typename T> void InitializeArray(T* d_arr, T val, size_t N, cudaStream_t& stream) { const int blockSize = 128; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); InitializeArrayKernel<T><<<gridSize, blockSize, 0, stream>>>(d_arr, val, N); } template <typename T> __global__ void MultiplyWith(T* d_arr, T val, size_t N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (; idx < N; idx += stride) { d_arr[idx] = d_arr[idx] * val; } } template <typename T> void MultiplyWith(T* d_arr, T val, size_t N, cudaStream_t& stream) { const int blockSize = 128; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); MultiplyWith<T><<<gridSize, blockSize, 0, stream>>>(d_arr, val, N); } info_simple* PrepareInfo(std::shared_ptr<himan::info> fullInfo, cudaStream_t& stream) { auto h_info = fullInfo->ToSimple(); size_t N = h_info->size_x * h_info->size_y; assert(N > 0); // 1. Reserve memory at device for unpacked data double* d_arr = 0; CUDA_CHECK(cudaMalloc(reinterpret_cast<double**>(&d_arr), N * sizeof(double))); // 2. Unpack if needed, leave data to device and simultaneously copy it back to cpu (himan cache) auto tempGrid = fullInfo->Grid(); if (tempGrid->IsPackedData()) { assert(tempGrid->PackedData().ClassName() == "simple_packed" || tempGrid->PackedData().ClassName() == "jpeg_packed"); assert(N > 0); assert(tempGrid->Data().Size() == N); double* arr = const_cast<double*>(tempGrid->Data().ValuesAsPOD()); CUDA_CHECK(cudaHostRegister(reinterpret_cast<void*>(arr), sizeof(double) * N, 0)); assert(arr); tempGrid->PackedData().Unpack(d_arr, N, &stream); CUDA_CHECK(cudaMemcpyAsync(arr, d_arr, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); tempGrid->PackedData().Clear(); auto c = GET_PLUGIN(cache); CUDA_CHECK(cudaStreamSynchronize(stream)); c->Insert(*fullInfo); CUDA_CHECK(cudaHostUnregister(arr)); h_info->packed_values = 0; } else { CUDA_CHECK( cudaMemcpyAsync(d_arr, fullInfo->Data().ValuesAsPOD(), sizeof(double) * N, cudaMemcpyHostToDevice, stream)); } h_info->values = d_arr; return h_info; } std::shared_ptr<himan::info> Fetch(const std::shared_ptr<const plugin_configuration> conf, const himan::forecast_time& theTime, const himan::level& theLevel, const himan::param& theParam, const himan::forecast_type& theType) { try { auto f = GET_PLUGIN(fetcher); return f->Fetch(conf, theTime, theLevel, theParam, theType, true); } catch (HPExceptionType& e) { if (e != kFileDataNotFound) { throw std::runtime_error("cape_cuda::Fetch(): Unable to proceed"); } return std::shared_ptr<info>(); } } __global__ void CopyLFCIteratorValuesKernel(double* __restrict__ d_Titer, const double* __restrict__ d_Tparcel, double* __restrict__ d_Piter, info_simple d_Penv) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < d_Penv.size_x * d_Penv.size_y) { if (d_Tparcel[idx] != kFloatMissing && d_Penv.values[idx] != kFloatMissing) { d_Titer[idx] = d_Tparcel[idx]; d_Piter[idx] = d_Penv.values[idx]; } } } __global__ void LiftLCLKernel(const double* __restrict__ d_P, const double* __restrict__ d_T, const double* __restrict__ d_PLCL, info_simple d_Ptarget, double* __restrict__ d_Tparcel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < d_Ptarget.size_x * d_Ptarget.size_y) { assert(d_P[idx] > 10); assert(d_P[idx] < 1500 || d_P[idx] == kFloatMissing); assert(d_Ptarget.values[idx] > 10); assert(d_Ptarget.values[idx] < 1500 || d_Ptarget.values[idx] == kFloatMissing); assert(d_T[idx] > 100); assert(d_T[idx] < 350 || d_T[idx] == kFloatMissing); double T = metutil::LiftLCL_(d_P[idx] * 100, d_T[idx], d_PLCL[idx] * 100, d_Ptarget.values[idx] * 100); assert(T > 100); assert(T < 350 || T == kFloatMissing); d_Tparcel[idx] = T; } } __global__ void MoistLiftKernel(const double* __restrict__ d_T, const double* __restrict__ d_P, info_simple d_Ptarget, double* __restrict__ d_Tparcel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T); assert(d_P); if (idx < d_Ptarget.size_x * d_Ptarget.size_y) { assert(d_P[idx] > 10); assert(d_P[idx] < 1500 || d_P[idx] == kFloatMissing); assert(d_Ptarget.values[idx] > 10); assert(d_Ptarget.values[idx] < 1500 || d_Ptarget.values[idx] == kFloatMissing); assert(d_T[idx] > 100); assert(d_T[idx] < 350 || d_T[idx] == kFloatMissing); double T = metutil::MoistLiftA_(d_P[idx] * 100, d_T[idx], d_Ptarget.values[idx] * 100); assert(T > 100); assert(T < 350 || T == kFloatMissing); d_Tparcel[idx] = T; } } __global__ void CAPEKernel(info_simple d_Tenv, info_simple d_Penv, info_simple d_Zenv, info_simple d_prevTenv, info_simple d_prevPenv, info_simple d_prevZenv, const double* __restrict d_Tparcel, const double* __restrict d_prevTparcel, const double* __restrict__ d_LFCT, const double* __restrict__ d_LFCP, double* __restrict__ d_CAPE, double* __restrict__ d_CAPE1040, double* __restrict__ d_CAPE3km, double* __restrict__ d_ELT, double* __restrict__ d_ELP, unsigned char* __restrict__ d_found, int d_curLevel, int d_breakLevel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < d_Tenv.size_x * d_Tenv.size_y && d_found[idx] != 4) { double Tenv = d_Tenv.values[idx]; assert(Tenv > 100.); double Penv = d_Penv.values[idx]; // hPa assert(Penv < 1200.); double Zenv = d_Zenv.values[idx]; // m double prevTenv = d_prevTenv.values[idx]; // K assert(prevTenv > 100.); double prevPenv = d_prevPenv.values[idx]; // hPa assert(prevPenv < 1200.); double prevZenv = d_prevZenv.values[idx]; // m double Tparcel = d_Tparcel[idx]; // K assert(Tparcel > 100. || Tparcel == kFloatMissing); double prevTparcel = d_prevTparcel[idx]; // K assert(prevTparcel > 100. || Tparcel == kFloatMissing); double LFCP = d_LFCP[idx]; // hPa assert(LFCP < 1200.); double LFCT = d_LFCT[idx]; // K assert(LFCT > 100.); if (Penv == kFloatMissing || Tenv == kFloatMissing || Zenv == kFloatMissing || prevZenv == kFloatMissing || Tparcel == kFloatMissing || Penv > LFCP) { // Missing data or current grid point is below LFC return; } if (prevTparcel == kFloatMissing && Tparcel != kFloatMissing) { // When rising above LFC, get accurate value of Tenv at that level so that even small amounts of CAPE // (and EL!) values can be determined. prevTenv = himan::numerical_functions::interpolation::Linear(LFCP, prevPenv, Penv, prevTenv, Tenv); prevZenv = himan::numerical_functions::interpolation::Linear(LFCP, prevPenv, Penv, prevZenv, Zenv); prevPenv = LFCP; // LFC pressure prevTparcel = LFCT; // LFC temperature // If LFC was found close to lower hybrid level, the linear interpolation and moist lift will result // to same values. In this case CAPE integration fails as there is no area formed between environment // and parcel temperature. The result for this is that LFC is found but EL is not found. To prevent // this, warm the parcel value just slightly so that a miniscule CAPE area is formed and EL is found. if (fabs(prevTparcel - prevTenv) < 0.0001) { prevTparcel += 0.0001; } } if (d_curLevel < d_breakLevel && (Tenv - Tparcel) > 25.) { // Temperature gap between environment and parcel too large --> abort search. // Only for values higher in the atmosphere, to avoid the effects of inversion d_found[idx] |= FCAPE; } else { if (prevZenv >= 3000. && Zenv >= 3000.) { d_found[idx] |= FCAPE3km; } if ((d_found[idx] & FCAPE3km) == 0) { double C = CAPE::CalcCAPE3km(Tenv, prevTenv, Tparcel, prevTparcel, Penv, prevPenv, Zenv, prevZenv); d_CAPE3km[idx] += C; assert(d_CAPE3km[idx] < 3000.); // 3000J/kg, not 3000m assert(d_CAPE3km[idx] >= 0); } double C = CAPE::CalcCAPE1040(Tenv, prevTenv, Tparcel, prevTparcel, Penv, prevPenv, Zenv, prevZenv); d_CAPE1040[idx] += C; assert(d_CAPE1040[idx] < 5000.); assert(d_CAPE1040[idx] >= 0); double CAPE, ELT, ELP; CAPE::CalcCAPE(Tenv, prevTenv, Tparcel, prevTparcel, Penv, prevPenv, Zenv, prevZenv, CAPE, ELT, ELP); d_CAPE[idx] += CAPE; assert(CAPE >= 0.); assert(d_CAPE[idx] < 8000); if (ELT != kFloatMissing) { d_ELT[idx] = ELT; d_ELP[idx] = ELP; } } } } __global__ void CINKernel(info_simple d_Tenv, info_simple d_prevTenv, info_simple d_Penv, info_simple d_prevPenv, info_simple d_Zenv, info_simple d_prevZenv, const double* __restrict__ d_Tparcel, const double* __restrict__ d_prevTparcel, const double* __restrict__ d_PLCL, const double* __restrict__ d_PLFC, const double* __restrict__ d_Psource, double* __restrict__ d_cinh, unsigned char* __restrict__ d_found) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < d_Tenv.size_x * d_Tenv.size_y && d_found[idx] == 0) { double Tenv = d_Tenv.values[idx]; // K assert(Tenv >= 150.); const double prevTenv = d_prevTenv.values[idx]; double Penv = d_Penv.values[idx]; // hPa assert(Penv < 1200. || Penv == kFloatMissing); const double prevPenv = d_prevPenv.values[idx]; double Tparcel = d_Tparcel[idx]; // K assert(Tparcel >= 150. || Tparcel == kFloatMissing); const double prevTparcel = d_prevTparcel[idx]; double PLFC = d_PLFC[idx]; // hPa assert(PLFC < 1200. || PLFC == kFloatMissing); double PLCL = d_PLCL[idx]; // hPa assert(PLCL < 1200. || PLCL == kFloatMissing); double Zenv = d_Zenv.values[idx]; // m double prevZenv = d_prevZenv.values[idx]; // m // Make sure we have passed the starting level if (Penv <= d_Psource[idx]) { if (Penv <= PLFC) { // reached max height d_found[idx] = 1; // Integrate the final piece from previous level to LFC level if (prevTparcel == kFloatMissing || prevPenv == kFloatMissing || prevTenv == kFloatMissing) { Tparcel = kFloatMissing; // unable to proceed with CIN integration } else { // First get LFC height in meters Zenv = numerical_functions::interpolation::Linear(PLFC, prevPenv, Penv, prevZenv, Zenv); // LFC environment temperature value Tenv = numerical_functions::interpolation::Linear(PLFC, prevPenv, Penv, prevTenv, Tenv); // LFC T parcel value Tparcel = numerical_functions::interpolation::Linear(PLFC, prevPenv, Penv, prevTparcel, Tparcel); Penv = PLFC; assert(Zenv > prevZenv); } } if (Penv < PLCL && Tparcel != kFloatMissing) { // Above LCL, switch to virtual temperature Tparcel = metutil::VirtualTemperature_(Tparcel, Penv * 100); Tenv = metutil::VirtualTemperature_(Tenv, Penv * 100); } if (Tparcel != kFloatMissing) { d_cinh[idx] += CAPE::CalcCIN(Tenv, prevTenv, Tparcel, prevTparcel, Penv, prevPenv, Zenv, prevZenv); assert(d_cinh[idx] <= 0); } } } } __global__ void LFCKernel(info_simple d_T, info_simple d_P, info_simple d_prevT, info_simple d_prevP, double* __restrict__ d_Tparcel, const double* __restrict__ d_prevTparcel, const double* __restrict__ d_LCLT, const double* __restrict__ d_LCLP, double* __restrict__ d_LFCT, double* __restrict__ d_LFCP, unsigned char* __restrict__ d_found, int d_curLevel, int d_breakLevel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T.values); assert(d_P.values); if (idx < d_T.size_x * d_T.size_y && d_found[idx] == 0) { double Tparcel = d_Tparcel[idx]; double prevTparcel = d_prevTparcel[idx]; double Tenv = d_T.values[idx]; assert(Tenv < 350.); assert(Tenv > 100.); double prevTenv = d_prevT.values[idx]; assert(prevTenv < 350.); assert(prevTenv > 100.); double Penv = d_P.values[idx]; double prevPenv = d_prevP.values[idx]; double LCLP = d_LCLP[idx]; if (Tparcel != kFloatMissing && d_curLevel < d_breakLevel && (Tenv - Tparcel) > 30.) { // Temperature gap between environment and parcel too large --> abort search. // Only for values higher in the atmosphere, to avoid the effects of inversion d_found[idx] = 1; } if (Tparcel != kFloatMissing && Penv <= LCLP && Tparcel > Tenv && d_found[idx] == 0) { d_found[idx] = 1; if (prevTparcel == kFloatMissing) { prevTparcel = d_LCLT[idx]; // previous is LCL assert(d_LCLT[idx] != kFloatMissing); } if (fabs(prevTparcel - prevTenv) < 0.0001) { d_LFCT[idx] = Tparcel; d_LFCP[idx] = Penv; } else { auto intersection = CAPE::GetPointOfIntersection(point(Tenv, Penv), point(prevTenv, prevPenv), point(Tparcel, Penv), point(prevTparcel, prevPenv)); d_LFCT[idx] = intersection.X(); d_LFCP[idx] = intersection.Y(); if (d_LFCT[idx] == kFloatMissing) { // Intersection not found, use exact level value d_LFCT[idx] = Tenv; d_LFCP[idx] = Penv; } } assert(d_LFCT[idx] > 100); assert(d_LFCT[idx] < 350); } } } __global__ void ThetaEKernel(info_simple d_T, info_simple d_RH, info_simple d_P, info_simple d_prevT, info_simple d_prevRH, info_simple d_prevP, double* __restrict__ d_maxThetaE, double* __restrict__ d_Tresult, double* __restrict__ d_TDresult, double* __restrict__ d_Presult, unsigned char* __restrict__ d_found) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T.values); assert(d_RH.values); assert(d_P.values); if (idx < d_T.size_x * d_T.size_y && d_found[idx] == 0) { double T = d_T.values[idx]; double P = d_P.values[idx]; double RH = d_RH.values[idx]; if (P == kFloatMissing || T == kFloatMissing || RH == kFloatMissing) { d_found[idx] = 1; } else { if (P < 600.) { // Cut search if reach level 600hPa // Linearly interpolate temperature and humidity values to 600hPa, to check // if highest theta e is found there T = numerical_functions::interpolation::Linear(600., P, d_prevP.values[idx], T, d_prevT.values[idx]); RH = numerical_functions::interpolation::Linear(600., P, d_prevP.values[idx], RH, d_prevRH.values[idx]); d_found[idx] = 1; // Make sure this is the last time we access this grid point P = 600.; } double TD = metutil::DewPointFromRH_(T, RH); double& refThetaE = d_maxThetaE[idx]; double ThetaE = metutil::smarttool::ThetaE_(T, RH, P * 100); if (ThetaE >= refThetaE) { refThetaE = ThetaE; d_Tresult[idx] = T; d_TDresult[idx] = TD; d_Presult[idx] = P; } } } } __global__ void MixingRatioKernel(const double* __restrict__ d_T, double* __restrict__ d_P, const double* __restrict__ d_RH, double* __restrict__ d_Tpot, double* __restrict__ d_MR, size_t N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T); assert(d_RH); assert(d_P); if (idx < N) { double T = d_T[idx]; double P = d_P[idx]; double RH = d_RH[idx]; assert((T > 150 && T < 350) || T == kFloatMissing); assert((P > 100 && P < 1500) || P == kFloatMissing); assert((RH >= 0 && RH < 102) || RH == kFloatMissing); if (T == kFloatMissing || P == kFloatMissing || RH == kFloatMissing) { d_P[idx] = kFloatMissing; } else { d_Tpot[idx] = metutil::Theta_(T, 100 * P); d_MR[idx] = metutil::smarttool::MixingRatio_(T, RH, 100 * P); d_P[idx] = P - 2.0; } } } __global__ void MixingRatioFinalizeKernel(double* __restrict__ d_T, double* __restrict__ d_TD, info_simple d_P, const double* __restrict__ d_Tpot, const double* __restrict__ d_MR, size_t N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; assert(d_T); assert(d_P.values); if (idx < N) { double P = d_P.values[idx]; double MR = d_MR[idx]; double Tpot = d_Tpot[idx]; assert((P > 100 && P < 1500) || P == kFloatMissing); if (Tpot != kFloatMissing && P != kFloatMissing) { d_T[idx] = Tpot * pow((P / 1000.), 0.2854); } double T = d_T[idx]; if (T != kFloatMissing && MR != kFloatMissing && P != kFloatMissing) { double Es = metutil::Es_(T); // Saturated water vapor pressure double E = metutil::E_(MR, 100 * P); double RH = E / Es * 100; d_TD[idx] = metutil::DewPointFromRH_(T, RH); } } } cape_source cape_cuda::GetHighestThetaEValuesGPU(const std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo) { himan::level curLevel = itsBottomLevel; const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); double* d_maxThetaE = 0; double* d_Tresult = 0; double* d_TDresult = 0; double* d_Presult = 0; unsigned char* d_found = 0; CUDA_CHECK(cudaMalloc((double**)&d_maxThetaE, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_Tresult, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_TDresult, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_Presult, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_found, sizeof(unsigned char) * N)); InitializeArray<double>(d_maxThetaE, -1, N, stream); InitializeArray<double>(d_Tresult, kFloatMissing, N, stream); InitializeArray<double>(d_TDresult, kFloatMissing, N, stream); InitializeArray<double>(d_Presult, kFloatMissing, N, stream); InitializeArray<unsigned char>(d_found, 0, N, stream); info_simple* h_prevT = 0; info_simple* h_prevP = 0; info_simple* h_prevRH = 0; while (true) { auto TInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto RHInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("RH-PRCNT"), myTargetInfo->ForecastType()); auto PInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); if (!TInfo || !RHInfo || !PInfo) { return std::make_tuple(std::vector<double>(), std::vector<double>(), std::vector<double>()); } auto h_T = PrepareInfo(TInfo, stream); auto h_P = PrepareInfo(PInfo, stream); auto h_RH = PrepareInfo(RHInfo, stream); assert(h_T->values); assert(h_RH->values); assert(h_P->values); bool release = true; if (!h_prevT) { // first time h_prevT = new info_simple(*h_T); h_prevP = new info_simple(*h_P); h_prevRH = new info_simple(*h_RH); release = false; } ThetaEKernel<<<gridSize, blockSize, 0, stream>>>(*h_T, *h_RH, *h_P, *h_prevT, *h_prevRH, *h_prevP, d_maxThetaE, d_Tresult, d_TDresult, d_Presult, d_found); std::vector<unsigned char> found(N, 0); CUDA_CHECK(cudaMemcpyAsync(&found[0], d_found, sizeof(unsigned char) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); if (release) { CUDA_CHECK(cudaFree(h_prevP->values)); CUDA_CHECK(cudaFree(h_prevRH->values)); CUDA_CHECK(cudaFree(h_prevT->values)); } delete h_prevP; delete h_prevT; delete h_prevRH; h_prevP = h_P; h_prevRH = h_RH; h_prevT = h_T; curLevel.Value(curLevel.Value() - 1); size_t foundCount = std::count(found.begin(), found.end(), 1); if (foundCount == found.size()) break; } CUDA_CHECK(cudaFree(h_prevP->values)); CUDA_CHECK(cudaFree(h_prevRH->values)); CUDA_CHECK(cudaFree(h_prevT->values)); delete h_prevP; delete h_prevT; delete h_prevRH; std::vector<double> Tthetae(myTargetInfo->Data().Size()); std::vector<double> TDthetae(myTargetInfo->Data().Size()); std::vector<double> Pthetae(myTargetInfo->Data().Size()); CUDA_CHECK(cudaMemcpyAsync(&Tthetae[0], d_Tresult, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&TDthetae[0], d_TDresult, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&Pthetae[0], d_Presult, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaFree(d_maxThetaE)); CUDA_CHECK(cudaFree(d_Tresult)); CUDA_CHECK(cudaFree(d_TDresult)); CUDA_CHECK(cudaFree(d_Presult)); CUDA_CHECK(cudaFree(d_found)); CUDA_CHECK(cudaStreamDestroy(stream)); return std::make_tuple(Tthetae, TDthetae, Pthetae); } cape_source cape_cuda::Get500mMixingRatioValuesGPU(std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo) { const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); level curLevel = itsBottomLevel; auto h = GET_PLUGIN(hitool); h->Configuration(conf); h->Time(myTargetInfo->Time()); h->ForecastType(myTargetInfo->ForecastType()); modifier_mean tp, mr; tp.HeightInMeters(false); mr.HeightInMeters(false); auto f = GET_PLUGIN(fetcher); auto PInfo = f->Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType(), false); if (!PInfo) { return std::make_tuple(std::vector<double>(), std::vector<double>(), std::vector<double>()); } else { // Himan specialty: empty data grid size_t miss = 0; for (auto& val : VEC(PInfo)) { if (val == kFloatMissing) miss++; } if (PInfo->Data().MissingCount() == PInfo->Data().Size()) { return std::make_tuple(std::vector<double>(), std::vector<double>(), std::vector<double>()); } } auto PVec = VEC(PInfo); auto P500m = h->VerticalValue(param("P-HPA"), 500.); h->HeightUnit(kHPa); tp.LowerHeight(PVec); mr.LowerHeight(PVec); tp.UpperHeight(P500m); mr.UpperHeight(P500m); double* d_Tpot = 0; double* d_MR = 0; double* d_T = 0; double* d_RH = 0; double* d_P = 0; double* d_TD = 0; CUDA_CHECK(cudaMalloc((double**)&d_Tpot, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_MR, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_T, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_RH, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_P, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_TD, N * sizeof(double))); InitializeArray<double>(d_Tpot, kFloatMissing, N, stream); InitializeArray<double>(d_MR, kFloatMissing, N, stream); while (true) { auto TVec = h->VerticalValue(param("T-K"), PVec); auto RHVec = h->VerticalValue(param("RH-PRCNT"), PVec); CUDA_CHECK(cudaMemcpyAsync(d_T, &TVec[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_RH, &RHVec[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_P, &PVec[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); MixingRatioKernel<<<gridSize, blockSize, 0, stream>>>(d_T, d_P, d_RH, d_Tpot, d_MR, N); std::vector<double> Tpot(N, kFloatMissing); std::vector<double> MR(N, kFloatMissing); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaMemcpyAsync(&Tpot[0], d_Tpot, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&MR[0], d_MR, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); tp.Process(Tpot, PVec); mr.Process(MR, PVec); size_t foundCount = tp.HeightsCrossed(); assert(tp.HeightsCrossed() == mr.HeightsCrossed()); if (foundCount == N) { break; } CUDA_CHECK(cudaMemcpyAsync(&PVec[0], d_P, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); } CUDA_CHECK(cudaStreamSynchronize(stream)); // Calculate averages auto Tpot = tp.Result(); auto MR = mr.Result(); // Copy averages to GPU for final calculation CUDA_CHECK(cudaMemcpyAsync(d_Tpot, &Tpot[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_MR, &MR[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); auto Psurf = Fetch(conf, myTargetInfo->Time(), itsBottomLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto h_P = PrepareInfo(Psurf, stream); InitializeArray<double>(d_T, kFloatMissing, N, stream); InitializeArray<double>(d_TD, kFloatMissing, N, stream); std::vector<double> T(Tpot.size(), kFloatMissing); std::vector<double> TD(T.size(), kFloatMissing); MixingRatioFinalizeKernel<<<gridSize, blockSize, 0, stream>>>(d_T, d_TD, *h_P, d_Tpot, d_MR, N); CUDA_CHECK(cudaMemcpyAsync(&T[0], d_T, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&TD[0], d_TD, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaFree(d_Tpot)); CUDA_CHECK(cudaFree(d_MR)); CUDA_CHECK(cudaFree(d_RH)); CUDA_CHECK(cudaFree(d_P)); CUDA_CHECK(cudaFree(d_T)); CUDA_CHECK(cudaFree(d_TD)); CUDA_CHECK(cudaStreamDestroy(stream)); return std::make_tuple(T, TD, VEC(Psurf)); } std::pair<std::vector<double>, std::vector<double>> cape_cuda::GetLFCGPU( const std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo, std::vector<double>& T, std::vector<double>& P, std::vector<double>& TenvLCL) { auto h = GET_PLUGIN(hitool); h->Configuration(conf); h->Time(myTargetInfo->Time()); h->ForecastType(myTargetInfo->ForecastType()); h->HeightUnit(kHPa); const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); double* d_TenvLCL = 0; double* d_Titer = 0; double* d_Piter = 0; double* d_LCLP = 0; double* d_LCLT = 0; double* d_LFCT = 0; double* d_LFCP = 0; double* d_Tparcel = 0; double* d_prevTparcel = 0; unsigned char* d_found = 0; CUDA_CHECK(cudaMalloc((double**)&d_TenvLCL, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_Piter, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_Titer, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_LCLT, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_LCLP, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_LFCT, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_LFCP, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_found, sizeof(unsigned char) * N)); CUDA_CHECK(cudaMalloc((double**)&d_Tparcel, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_prevTparcel, sizeof(double) * N)); CUDA_CHECK(cudaMemcpyAsync(d_TenvLCL, &TenvLCL[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_Titer, &T[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_Piter, &P[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_LCLT, d_Titer, sizeof(double) * N, cudaMemcpyDeviceToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_LCLP, d_Piter, sizeof(double) * N, cudaMemcpyDeviceToDevice, stream)); InitializeArray<double>(d_LFCT, kFloatMissing, N, stream); InitializeArray<double>(d_LFCP, kFloatMissing, N, stream); InitializeArray<double>(d_prevTparcel, kFloatMissing, N, stream); InitializeArray<unsigned char>(d_found, 0, N, stream); // For each grid point find the hybrid level that's below LCL and then pick the lowest level // among all grid points; most commonly it's the lowest hybrid level auto levels = h->LevelForHeight(myTargetInfo->Producer(), ::Max(P)); level curLevel = levels.first; auto prevPenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto prevTenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto h_prevTenv = PrepareInfo(prevTenvInfo, stream); auto h_prevPenv = PrepareInfo(prevPenvInfo, stream); assert(h_prevTenv->values); assert(h_prevPenv->values); curLevel.Value(curLevel.Value() - 1); std::vector<unsigned char> found(N, 0); std::vector<double> LFCT(N, kFloatMissing); std::vector<double> LFCP(N, kFloatMissing); for (size_t i = 0; i < N; i++) { if ((T[i] - TenvLCL[i]) > 0.001) { found[i] = 1; LFCT[i] = T[i]; LFCP[i] = P[i]; } } CUDA_CHECK(cudaMemcpyAsync(d_found, &found[0], sizeof(unsigned char) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_LFCT, &LFCT[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_LFCP, &LFCP[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); auto hPa450 = h->LevelForHeight(myTargetInfo->Producer(), 450.); auto hPa150 = h->LevelForHeight(myTargetInfo->Producer(), 150.); while (curLevel.Value() > hPa150.first.Value()) { // Get environment temperature and pressure values for this level auto TenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto PenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto h_Penv = PrepareInfo(PenvInfo, stream); auto h_Tenv = PrepareInfo(TenvInfo, stream); // Lift the particle from previous level to this level. In the first revolution // of this loop the starting level is LCL. If target level level is below current level // (ie. we would be lowering the particle) missing value is returned. MoistLiftKernel<<<gridSize, blockSize, 0, stream>>>(d_Titer, d_Piter, *h_Penv, d_Tparcel); LFCKernel<<<gridSize, blockSize, 0, stream>>>(*h_Tenv, *h_Penv, *h_prevTenv, *h_prevPenv, d_Tparcel, d_prevTparcel, d_LCLT, d_LCLP, d_LFCT, d_LFCP, d_found, curLevel.Value(), hPa450.first.Value()); CUDA_CHECK(cudaMemcpyAsync(&found[0], d_found, sizeof(unsigned char) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaFree(h_prevPenv->values)); CUDA_CHECK(cudaFree(h_prevTenv->values)); delete h_prevPenv; delete h_prevTenv; h_prevPenv = h_Penv; h_prevTenv = h_Tenv; CUDA_CHECK(cudaStreamSynchronize(stream)); if (static_cast<size_t>(std::count(found.begin(), found.end(), 1)) == found.size()) break; CUDA_CHECK(cudaMemcpyAsync(d_prevTparcel, d_Tparcel, sizeof(double) * N, cudaMemcpyDeviceToDevice, stream)); curLevel.Value(curLevel.Value() - 1); } CUDA_CHECK(cudaFree(h_prevPenv->values)); CUDA_CHECK(cudaFree(h_prevTenv->values)); delete h_prevPenv; delete h_prevTenv; CUDA_CHECK(cudaMemcpyAsync(&LFCT[0], d_LFCT, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&LFCP[0], d_LFCP, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaFree(d_LFCT)); CUDA_CHECK(cudaFree(d_LFCP)); CUDA_CHECK(cudaFree(d_LCLT)); CUDA_CHECK(cudaFree(d_LCLP)); CUDA_CHECK(cudaFree(d_Tparcel)); CUDA_CHECK(cudaFree(d_prevTparcel)); CUDA_CHECK(cudaFree(d_found)); CUDA_CHECK(cudaFree(d_Titer)); CUDA_CHECK(cudaFree(d_Piter)); CUDA_CHECK(cudaFree(d_TenvLCL)); CUDA_CHECK(cudaStreamDestroy(stream)); return std::make_pair(LFCT, LFCP); } void cape_cuda::GetCINGPU(const std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo, const std::vector<double>& Tsource, const std::vector<double>& Psource, const std::vector<double>& TLCL, const std::vector<double>& PLCL, const std::vector<double>& PLFC, param CINParam) { const params PParams({param("PGR-PA"), param("P-PA")}); auto h = GET_PLUGIN(hitool); h->Configuration(conf); h->Time(myTargetInfo->Time()); h->ForecastType(myTargetInfo->ForecastType()); h->HeightUnit(kHPa); forecast_time ftime = myTargetInfo->Time(); forecast_type ftype = myTargetInfo->ForecastType(); /* * Modus operandi: * * 1. Integrate from ground to LCL dry adiabatically * * This can be done always since LCL is known at all grid points * (that have source data values defined). * * 2. Integrate from LCL to LFC moist adiabatically * * Note! For some points integration will fail (no LFC found) * * We stop integrating at first time CAPE area is found! */ // Get LCL and LFC heights in meters auto ZLCL = h->VerticalValue(param("HL-M"), PLCL); auto ZLFC = h->VerticalValue(param("HL-M"), PLFC); level curLevel = itsBottomLevel; auto prevZenvInfo = Fetch(conf, ftime, curLevel, param("HL-M"), ftype); auto prevTenvInfo = Fetch(conf, ftime, curLevel, param("T-K"), ftype); auto prevPenvInfo = Fetch(conf, ftime, curLevel, param("P-HPA"), ftype); const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); auto h_prevZenv = PrepareInfo(prevZenvInfo, stream); auto h_prevTenv = PrepareInfo(prevTenvInfo, stream); auto h_prevPenv = PrepareInfo(prevPenvInfo, stream); double* d_Psource = 0; double* d_Tparcel = 0; double* d_prevTparcel = 0; double* d_Piter = 0; double* d_prevPiter = 0; double* d_Titer = 0; double* d_prevTiter = 0; double* d_PLCL = 0; double* d_PLFC = 0; double* d_cinh = 0; unsigned char* d_found = 0; CUDA_CHECK(cudaMalloc((double**)&d_Psource, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_Tparcel, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_prevTparcel, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_Piter, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_Titer, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_PLCL, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_PLFC, N * sizeof(double))); CUDA_CHECK(cudaMalloc((double**)&d_cinh, N * sizeof(double))); CUDA_CHECK(cudaMalloc((unsigned char**)&d_found, N * sizeof(unsigned char))); InitializeArray<double>(d_cinh, 0., N, stream); InitializeArray<double>(d_Tparcel, kFloatMissing, N, stream); CUDA_CHECK(cudaMemcpyAsync(d_prevTparcel, &Tsource[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_Psource, &Psource[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_Titer, &Tsource[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_Piter, &Psource[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_PLCL, &PLCL[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_PLFC, &PLFC[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); std::vector<unsigned char> found(N, 0); for (size_t i = 0; i < PLFC.size(); i++) { if (PLFC[i] == kFloatMissing) found[i] = true; } CUDA_CHECK(cudaMemcpyAsync(d_found, &found[0], sizeof(unsigned char) * N, cudaMemcpyHostToDevice, stream)); curLevel.Value(curLevel.Value() - 1); auto hPa100 = h->LevelForHeight(myTargetInfo->Producer(), 100.); while (curLevel.Value() > hPa100.first.Value()) { auto ZenvInfo = Fetch(conf, ftime, curLevel, param("HL-M"), ftype); auto TenvInfo = Fetch(conf, ftime, curLevel, param("T-K"), ftype); auto PenvInfo = Fetch(conf, ftime, curLevel, param("P-HPA"), ftype); auto h_Zenv = PrepareInfo(ZenvInfo, stream); auto h_Penv = PrepareInfo(PenvInfo, stream); auto h_Tenv = PrepareInfo(TenvInfo, stream); LiftLCLKernel<<<gridSize, blockSize, 0, stream>>>(d_Piter, d_Titer, d_PLCL, *h_Penv, d_Tparcel); CINKernel<<<gridSize, blockSize, 0, stream>>>(*h_Tenv, *h_prevTenv, *h_Penv, *h_prevPenv, *h_Zenv, *h_prevZenv, d_Tparcel, d_prevTparcel, d_PLCL, d_PLFC, d_Psource, d_cinh, d_found); CUDA_CHECK(cudaMemcpyAsync(&found[0], d_found, sizeof(unsigned char) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(d_prevTparcel, d_Tparcel, sizeof(double) * N, cudaMemcpyDeviceToDevice, stream)); CUDA_CHECK(cudaFree(h_prevPenv->values)); CUDA_CHECK(cudaFree(h_prevTenv->values)); CUDA_CHECK(cudaFree(h_prevZenv->values)); delete h_prevPenv; delete h_prevTenv; delete h_prevZenv; h_prevPenv = h_Penv; h_prevTenv = h_Tenv; h_prevZenv = h_Zenv; CUDA_CHECK(cudaStreamSynchronize(stream)); if (static_cast<size_t>(std::count(found.begin(), found.end(), 1)) == found.size()) break; // preserve starting position for those grid points that have value CopyLFCIteratorValuesKernel<<<gridSize, blockSize, 0, stream>>>(d_Titer, d_Tparcel, d_Piter, *h_Penv); curLevel.Value(curLevel.Value() - 1); } CUDA_CHECK(cudaFree(h_prevPenv->values)); CUDA_CHECK(cudaFree(h_prevTenv->values)); CUDA_CHECK(cudaFree(h_prevZenv->values)); delete h_prevPenv; delete h_prevTenv; delete h_prevZenv; std::vector<double> cinh(N, 0); CUDA_CHECK(cudaMemcpyAsync(&cinh[0], d_cinh, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaFree(d_cinh)); CUDA_CHECK(cudaFree(d_Psource)); CUDA_CHECK(cudaFree(d_Tparcel)); CUDA_CHECK(cudaFree(d_prevTparcel)); CUDA_CHECK(cudaFree(d_Piter)); CUDA_CHECK(cudaFree(d_prevPiter)); CUDA_CHECK(cudaFree(d_Titer)); CUDA_CHECK(cudaFree(d_prevTiter)); CUDA_CHECK(cudaFree(d_PLCL)); CUDA_CHECK(cudaFree(d_PLFC)); CUDA_CHECK(cudaFree(d_found)); CUDA_CHECK(cudaStreamDestroy(stream)); myTargetInfo->Param(CINParam); myTargetInfo->Data().Set(cinh); } void cape_cuda::GetCAPEGPU(const std::shared_ptr<const plugin_configuration> conf, std::shared_ptr<info> myTargetInfo, const std::vector<double>& T, const std::vector<double>& P, param ELTParam, param ELPParam, param CAPEParam, param CAPE1040Param, param CAPE3kmParam) { assert(T.size() == P.size()); auto h = GET_PLUGIN(hitool); h->Configuration(conf); h->Time(myTargetInfo->Time()); h->ForecastType(myTargetInfo->ForecastType()); h->HeightUnit(kHPa); // Found count determines if we have calculated all three CAPE variation for a single grid point std::vector<unsigned char> found(T.size(), 0); // No LFC --> No CAPE for (size_t i = 0; i < P.size(); i++) { if (P[i] == kFloatMissing) { found[i] |= FCAPE; } } const size_t N = myTargetInfo->Data().Size(); const int blockSize = 256; const int gridSize = N / blockSize + (N % blockSize == 0 ? 0 : 1); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); double* d_CAPE = 0; double* d_CAPE1040 = 0; double* d_CAPE3km = 0; double* d_ELT = 0; double* d_ELP = 0; double* d_Titer = 0; double* d_Piter = 0; double* d_prevTparcel = 0; double* d_Tparcel = 0; double* d_LFCT = 0; double* d_LFCP = 0; unsigned char* d_found = 0; CUDA_CHECK(cudaMalloc((double**)&d_CAPE, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_CAPE1040, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_CAPE3km, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_ELP, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_ELT, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_Piter, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_Titer, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_Tparcel, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_prevTparcel, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_LFCT, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_LFCP, sizeof(double) * N)); CUDA_CHECK(cudaMalloc((double**)&d_found, sizeof(unsigned char) * N)); InitializeArray<unsigned char>(d_found, 0, N, stream); CUDA_CHECK(cudaMemcpyAsync(d_Titer, &T[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_prevTparcel, d_Titer, sizeof(double) * N, cudaMemcpyDeviceToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_Piter, &P[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_LFCT, &T[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_LFCP, &P[0], sizeof(double) * N, cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_found, &found[0], sizeof(unsigned char) * N, cudaMemcpyHostToDevice, stream)); InitializeArray<double>(d_CAPE, 0., N, stream); InitializeArray<double>(d_CAPE1040, 0., N, stream); InitializeArray<double>(d_CAPE3km, 0., N, stream); InitializeArray<double>(d_ELP, kFloatMissing, N, stream); InitializeArray<double>(d_ELT, kFloatMissing, N, stream); // For each grid point find the hybrid level that's below LFC and then pick the lowest level // among all grid points auto levels = h->LevelForHeight(myTargetInfo->Producer(), ::Max(P)); level curLevel = levels.first; auto prevZenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("HL-M"), myTargetInfo->ForecastType()); auto prevTenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto prevPenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto h_prevZenv = PrepareInfo(prevZenvInfo, stream); auto h_prevPenv = PrepareInfo(prevPenvInfo, stream); auto h_prevTenv = PrepareInfo(prevTenvInfo, stream); curLevel.Value(curLevel.Value()); auto hPa100 = h->LevelForHeight(myTargetInfo->Producer(), 100.); while (curLevel.Value() > hPa100.first.Value()) { auto PenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("P-HPA"), myTargetInfo->ForecastType()); auto TenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("T-K"), myTargetInfo->ForecastType()); auto ZenvInfo = Fetch(conf, myTargetInfo->Time(), curLevel, param("HL-M"), myTargetInfo->ForecastType()); auto h_Zenv = PrepareInfo(ZenvInfo, stream); auto h_Penv = PrepareInfo(PenvInfo, stream); auto h_Tenv = PrepareInfo(TenvInfo, stream); MoistLiftKernel<<<gridSize, blockSize, 0, stream>>>(d_Titer, d_Piter, *h_Penv, d_Tparcel); CAPEKernel<<<gridSize, blockSize, 0, stream>>>( *h_Tenv, *h_Penv, *h_Zenv, *h_prevTenv, *h_prevPenv, *h_prevZenv, d_Tparcel, d_prevTparcel, d_LFCT, d_LFCP, d_CAPE, d_CAPE1040, d_CAPE3km, d_ELT, d_ELP, d_found, curLevel.Value(), hPa100.first.Value()); CUDA_CHECK(cudaFree(h_prevZenv->values)); CUDA_CHECK(cudaFree(h_prevTenv->values)); CUDA_CHECK(cudaFree(h_prevPenv->values)); CUDA_CHECK(cudaMemcpyAsync(d_prevTparcel, d_Tparcel, sizeof(double) * N, cudaMemcpyDeviceToDevice, stream)); delete h_prevZenv; delete h_prevPenv; delete h_prevTenv; h_prevZenv = h_Zenv; h_prevTenv = h_Tenv; h_prevPenv = h_Penv; curLevel.Value(curLevel.Value() - 1); } CUDA_CHECK(cudaFree(h_prevZenv->values)); CUDA_CHECK(cudaFree(h_prevTenv->values)); CUDA_CHECK(cudaFree(h_prevPenv->values)); delete h_prevZenv; delete h_prevPenv; delete h_prevTenv; #if 0 // If the CAPE area is continued all the way to level 60 and beyond, we don't have an EL for that // (since integration is forcefully stopped) // In this case level 60 = EL for (size_t i = 0; i < CAPE.size(); i++) { if (CAPE[i] > 0 && ELT[i] == kFloatMissing) { TenvInfo->LocationIndex(i); PenvInfo->LocationIndex(i); ELT[i] = TenvInfo->Value(); ELP[i] = PenvInfo->Value(); } } #endif std::vector<double> CAPE(T.size(), 0); std::vector<double> CAPE1040(T.size(), 0); std::vector<double> CAPE3km(T.size(), 0); std::vector<double> ELT(T.size(), kFloatMissing); std::vector<double> ELP(T.size(), kFloatMissing); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaMemcpyAsync(&CAPE[0], d_CAPE, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&CAPE1040[0], d_CAPE1040, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&CAPE3km[0], d_CAPE3km, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&ELT[0], d_ELT, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&ELP[0], d_ELP, sizeof(double) * N, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaFree(d_Tparcel)); CUDA_CHECK(cudaFree(d_prevTparcel)); CUDA_CHECK(cudaFree(d_LFCT)); CUDA_CHECK(cudaFree(d_LFCP)); CUDA_CHECK(cudaFree(d_found)); CUDA_CHECK(cudaStreamSynchronize(stream)); CUDA_CHECK(cudaFree(d_CAPE)); CUDA_CHECK(cudaFree(d_CAPE1040)); CUDA_CHECK(cudaFree(d_CAPE3km)); CUDA_CHECK(cudaFree(d_ELT)); CUDA_CHECK(cudaFree(d_ELP)); myTargetInfo->Param(ELTParam); myTargetInfo->Data().Set(ELT); myTargetInfo->Param(ELPParam); myTargetInfo->Data().Set(ELP); myTargetInfo->Param(CAPEParam); myTargetInfo->Data().Set(CAPE); myTargetInfo->Param(CAPE1040Param); myTargetInfo->Data().Set(CAPE1040); myTargetInfo->Param(CAPE3kmParam); myTargetInfo->Data().Set(CAPE3km); }
541951397a6687013e6b985b8cbd4e6a49878d78.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> #include <cmath> #define HEIGHT 333 #define WIDTH 1366 #define PI 3.14159 #define TRUE 1 #define FALSE 0 #define MAX_BRIGHTNESS 255 typedef int pixel_t; __device__ int global_max = 0; __device__ float alpha = 0; __host__ void gaussian_filter(float *, const float); __global__ void convolution(pixel_t *, pixel_t *, float *, const int); __global__ void convolution1(pixel_t *cin, pixel_t *cout, pixel_t *mask, const int mSize); __device__ void Travers(int, int, pixel_t *, pixel_t *, pixel_t *); __global__ void pixel_hypot(pixel_t *, pixel_t *, pixel_t *, pixel_t *); __global__ void findNonMax(pixel_t *, pixel_t *, pixel_t *, const int, const int); __global__ void hysterisisThreshold(pixel_t *, pixel_t *, pixel_t *); __global__ void kernel2(pixel_t*, pixel_t*, int *); __global__ void kernel3(pixel_t*, pixel_t*); __global__ void houghlines(pixel_t*, pixel_t*, pixel_t*, int *, pixel_t*); __global__ void dilation(pixel_t*, pixel_t*); __global__ void erosion(pixel_t*, pixel_t*); /***************************************************************************/ __global__ void houghlines(pixel_t* d_img_cannyout, pixel_t* d_hough_out_img, pixel_t* d_img_houghout, int *d_max, pixel_t* d_out_img) { float theta; int nrho = (int)sqrt((float)(HEIGHT*HEIGHT) + (float)(WIDTH*WIDTH)) + 1; int ntheta = 271; // -90 ~ 180 float rho = 0, rad = PI / 180; __shared__ int max; max = 0; *d_max = 0; int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i<HEIGHT && j<WIDTH) { if (d_img_cannyout[i*WIDTH + j] == 1) { for (theta = -90; theta <= 180; theta++) { rho = i*cos(theta*rad) + j*sin(theta*rad); if (rho>0 && rho<nrho) { atomicAdd(&d_img_houghout[(int)rho * ntheta + (int)(theta + 90)], 1); //TODO: SHOULD INITIALIZE TO ZERO if (max < (int)d_img_houghout[(int)rho* ntheta + (int)(theta + 90)]) { max = (int)d_img_houghout[(int)rho* ntheta + (int)(theta + 90)]; } } } } } atomicMax(&global_max, (int)max); // if time permits change this to global_max *d_max = global_max; } /****************************************************************/ __global__ void kernel2(pixel_t* d_img_houghout, pixel_t* d_hough_out_img, int *d_max) { int nrho = (int)sqrt((float)(HEIGHT*HEIGHT) + (float)(WIDTH*WIDTH)) + 1; int ntheta = 271; // -90 ~ 180 int k; k = *d_max; alpha = (float)255 / k; //printf("The alpha is:%f and k is:%d\n",alpha,k); int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i<nrho && j<ntheta) { d_hough_out_img[i*ntheta + j] = (pixel_t)(alpha*d_img_houghout[i*ntheta + j]); } } /********************************************************************/ __global__ void kernel3(pixel_t* d_hough_out_img, pixel_t* d_out_img) { float theta; int nrho = (int)sqrt((float)(HEIGHT*HEIGHT) + (float)(WIDTH*WIDTH)) + 1; int ntheta = 271; // -90 ~ 180 float rho = 0, rad = PI / 180; int thresh = 70; int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i<HEIGHT && j<WIDTH) { for (theta = -90; theta<180; theta++) { rho = i*cos(theta*rad) + j*sin(theta*rad); if (rho>0 && rho<nrho && d_hough_out_img[(int)rho* ntheta + (int)(theta + 90)]>thresh) { d_out_img[i*WIDTH + j] = 255; } } } } /****************************************************************/ __global__ void hysterisisThreshold(pixel_t *d_edgepoints, pixel_t *d_edges, pixel_t *d_visitedmap) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (d_edgepoints[row* WIDTH + col] == 1) { d_edges[row* WIDTH + col] = 1; Travers(row, col, d_edgepoints, d_edges, d_visitedmap); d_visitedmap[row* WIDTH + col] = 1; } } /**********************************************************************/ __device__ void Travers(int row, int col, pixel_t *d_edgepoints, pixel_t * d_edges, pixel_t *d_visitedmap) { if (d_visitedmap[row * WIDTH + col] == 1) return; //1 if (d_edgepoints[(row + 1) * WIDTH + col] == 2) { d_edges[(row + 1) * WIDTH + col] = 1; d_visitedmap[(row + 1) * WIDTH + col] = 1; Travers(row + 1, col, d_edgepoints, d_edges, d_visitedmap); return; } //2 if (d_edgepoints[(row + 1) * WIDTH + col - 1] == 2) { d_edges[(row + 1) * WIDTH + col - 1] = 1; d_visitedmap[(row + 1) * WIDTH + col - 1] = 1; Travers(row + 1, col - 1, d_edgepoints, d_edges, d_visitedmap); return; } //3 if (d_edgepoints[(row)* WIDTH + col - 1] == 2) { d_edges[(row)* WIDTH + col - 1] = 1; d_visitedmap[(row)* WIDTH + col - 1] = 1; Travers(row, col - 1, d_edgepoints, d_edges, d_visitedmap); return; } //4 if (d_edgepoints[(row - 1) * WIDTH + col - 1] == 2) { d_edges[(row - 1) * WIDTH + col - 1] = 1; d_visitedmap[(row - 1) * WIDTH + col - 1] = 1; Travers(row - 1, col - 1, d_edgepoints, d_edges, d_visitedmap); return; } //5 if (d_edgepoints[(row - 1) * WIDTH + col] == 2) { d_edges[(row - 1) * WIDTH + col] = 1; d_visitedmap[(row - 1) * WIDTH + col] = 1; Travers(row - 1, col, d_edgepoints, d_edges, d_visitedmap); return; } //6 if (d_edgepoints[(row - 1) * WIDTH + col + 1] == 2) { d_edges[(row - 1) * WIDTH + col + 1] = 1; d_visitedmap[(row - 1) * WIDTH + col + 1] = 1; Travers(row - 1, col + 1, d_edgepoints, d_edges, d_visitedmap); return; } //7 if (d_edgepoints[(row)* WIDTH + col + 1] == 2) { d_edges[(row)* WIDTH + col + 1] = 1; d_visitedmap[(row)* WIDTH + col + 1] = 1; Travers(row, col + 1, d_edgepoints, d_edges, d_visitedmap); return; } //8 if (d_edgepoints[(row + 1) * WIDTH + col + 1] == 2) { d_edges[(row + 1) * WIDTH + col + 1] = 1; d_visitedmap[(row + 1) * WIDTH + col + 1] = 1; Travers(row + 1, col + 1, d_edgepoints, d_edges, d_visitedmap); return; } return; } /*********************************************************/ __global__ void findNonMax(pixel_t *d_nonMax, pixel_t *d_thisAngle, pixel_t *d_edgepoints, const int tmin, const int tmax) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; pixel_t thisAngle = d_thisAngle[row* WIDTH + col]; pixel_t nonMax = d_nonMax[row* WIDTH + col]; if (row >0 && col > 0 && row<HEIGHT && col<WIDTH) { //Horizontal Edge if (((-22.5 < thisAngle) && (thisAngle <= 22.5)) || ((157.5 < thisAngle) && (thisAngle <= -157.5))) { if ((d_nonMax[row* WIDTH + col] < d_nonMax[row* WIDTH + col + 1]) || (d_nonMax[row* WIDTH + col] < d_nonMax[row* WIDTH + col - 1])) nonMax = 0; } //Vertical Edge if (((-112.5 < thisAngle) && (thisAngle <= -67.5)) || ((67.5 < thisAngle) && (thisAngle <= 112.5))) { if ((d_nonMax[row* WIDTH + col] < d_nonMax[(row + 1)* WIDTH + col]) || (d_nonMax[row* WIDTH + col] < d_nonMax[(row - 1)* WIDTH + col])) nonMax = 0; } //+45 Degree Edge if (((-67.5 < thisAngle) && (thisAngle <= -22.5)) || ((112.5 < thisAngle) && (thisAngle <= 157.5))) { if ((d_nonMax[row* WIDTH + col] < d_nonMax[(row + 1)* WIDTH + col - 1]) || (d_nonMax[row* WIDTH + col] < d_nonMax[(row - 1)* WIDTH + col + 1])) nonMax = 0; } //-45 Degree Edge if (((-157.5 < thisAngle) && (thisAngle <= -112.5)) || ((67.5 < thisAngle) && (thisAngle <= 22.5))) { if ((d_nonMax[row* WIDTH + col] < d_nonMax[(row + 1)* WIDTH + (col + 1)]) || (d_nonMax[row* WIDTH + col] < d_nonMax[(row - 1)* WIDTH + col - 1])) nonMax = 0; } d_nonMax[row* WIDTH + col] = nonMax; if (d_nonMax[row* WIDTH + col] > tmax) d_edgepoints[row* WIDTH + col] = 1; else if ((d_nonMax[row* WIDTH + col] < tmax) && (d_nonMax[row* WIDTH + col] > tmin)) d_edgepoints[row* WIDTH + col] = 2; else if (d_nonMax[row* WIDTH + col] < tmin) d_edgepoints[row* WIDTH + col] = 0; } } /*******************************************************/ __global__ void pixel_hypot(pixel_t *d_mGx, pixel_t *d_mGy, pixel_t *d_gradient, pixel_t *d_thisAngle) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; long int gradient = 0; if (row >0 && col > 0 && row<HEIGHT && col<WIDTH) { pixel_t pixel_gx, pixel_gy; pixel_gx = d_mGx[row* WIDTH + col]; pixel_gy = d_mGy[row* WIDTH + col]; gradient = (pixel_t)sqrt((float)((pixel_gx* pixel_gx) + (pixel_gy * pixel_gy))); if (gradient >= 255) d_gradient[row* WIDTH + col] = 255; else d_gradient[row* WIDTH + col] = (pixel_t)gradient; if (pixel_gx != 0) d_thisAngle[row* WIDTH + col] = (atan2f((float)pixel_gy, (float)pixel_gx) * 180) / PI; else if (pixel_gy != 0) d_thisAngle[row* WIDTH + col] = 90; } } /*******************************************************/ __host__ void gaussian_filter(float *gout, const float sigma) { int n = 7; //2 * (int)(2 * sigma) + 3; // size of gaussian mask matrix const float mean = (float)floor(n / 2.0); int i, j; /* The gMask size is calculated from the value of sigma(n= 2 * (int)(2 * sigma) + 3 ) However due to errors in unallocatiion of host memoery in gaussian_filter kernel , we fixed the value of sigma, hence n*/ for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { gout[i*n + j] = exp(-0.5 * (pow((double)(i - mean) / sigma, 2.0) + pow((double)(j - mean) / sigma, 2.0))) / (2 * PI * sigma * sigma); } } } /*******************************************************/ __global__ void convolution(pixel_t *cin, pixel_t *cout, float *mask, const int mSize) { int k = mSize / 2; int i, j; float mpix = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row >= k && row < HEIGHT) && (col >= k && col < WIDTH)) { for (i = -k; i <= k; i++) { for (j = -k; j <= k; j++) { mpix += cin[(row - i) * WIDTH + col - j] * (float)mask[(i + k) * mSize + (j + k)]; } } if (mpix < 0.0) mpix = 0.0; else if (mpix >255.0) mpix = 255.0; cout[row * WIDTH + col] = (pixel_t)mpix; } } /*****************************************************************/ __global__ void convolution1(pixel_t *cin, pixel_t *cout, pixel_t *mask, const int mSize) { int k = mSize / 2; int i, j; float mpix = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row >= k && row < HEIGHT) && (col >= k && col < WIDTH)) { for (i = -k; i <= k; i++) { for (j = -k; j <= k; j++) { mpix += cin[(row - i) * WIDTH + col - j] * (float)mask[(i + k) * mSize + (j + k)]; } } if (mpix < 0.0) mpix = 0.0; else if (mpix >255.0) mpix = 255.0; cout[row * WIDTH + col] = (pixel_t)mpix; } } ////////////////////////////////////////////////////////////////// __global__ void dilation(pixel_t *diin, pixel_t *diout) { int mSize = 5; int k = mSize / 2; int i, j; int count = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row >= k && row < HEIGHT-k) && (col >= k && col < WIDTH-k)) { for (i = -k; i <= k; i++) { for (j = -k; j <= k; j++) { if (diin[(row - i) * WIDTH + col - j] == 1) count += 1; } } if (count >= 1) diout[row * WIDTH + col] = 1; else diout[row * WIDTH + col] = 0; } } ////////////////////////////////////////////////////////////////// __global__ void erosion(pixel_t *erin, pixel_t *erout) { int mSize = 11; int k = mSize / 2; int i, j; int count = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row >= k && row < HEIGHT-k) && (col >= k && col < WIDTH-k)) { for (i = -k; i <= k; i++) { for (j = -k; j <= k; j++) { if (erin[(row - i) * WIDTH + col - j] == 1) count += 1; } } if (count == mSize*mSize) erout[row * WIDTH + col] = 1; else erout[row * WIDTH + col] = 0; } } /**************************************************************/ int main(int argc, char *argv[]) { pixel_t *h_img_ip, *h_img_op; // input and output images in the host pixel_t *d_img_ip, *d_img_op; // input and output images in the device pixel_t *d_gradient, *h_gradient, *d_thisAngle, *h_thisAngle; int i, j; int dev_count; const float sigma = 1.4; float *h_gMask, *d_gMask; pixel_t *d_gGx, *h_gGx, *d_gGy, *h_gGy; pixel_t *d_mGx, *d_mGy; pixel_t *h_nonMax, *d_nonMax; pixel_t *h_edgepoints, *d_edgepoints; pixel_t *h_edges, *d_edges; pixel_t *d_visitedmap; int n = 7; //2 * (int)(2 * sigma) + 3; // size of gaussian mask matrix pixel_t h_mGx[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; pixel_t h_mGy[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; h_img_ip = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_img_op = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_gGx = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_gGy = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_thisAngle = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_gradient = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_nonMax = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_edgepoints = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_edges = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_gMask = (float *)malloc(n * n * sizeof(float)); FILE *fp1, *fp2, *fp3, *fp4, *fp5, *fp6; hipGetDeviceCount(&dev_count); printf("Dev count: %d\n", dev_count); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, 0); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); hipMalloc(&d_img_ip, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_img_op, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_gGx, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_gGy, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_gradient, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_thisAngle, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_nonMax, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_edgepoints, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_edges, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_visitedmap, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_mGx, 3 * 3 * sizeof(pixel_t)); hipMalloc(&d_mGy, 3 * 3 * sizeof(pixel_t)); hipMalloc(&d_gMask, n * n * sizeof(float)); fp1 = fopen("car1.txt", "r"); fp2 = fopen("cannyoutput.txt", "w"); for (i = 0; i < HEIGHT; i++) { for (j = 0; j < WIDTH; j++) { fscanf(fp1, "%d ", &h_img_ip[i*WIDTH + j]); } } printf("before gaussian filter\n"); gaussian_filter(h_gMask, sigma); hipMemcpy(d_gMask, h_gMask, n * n* sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_mGx, h_mGx, 3 * 3 * sizeof(pixel_t), hipMemcpyHostToDevice); hipMemcpy(d_mGy, h_mGy, 3 * 3 * sizeof(pixel_t), hipMemcpyHostToDevice); hipMemcpy(d_img_ip, h_img_ip, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); hipMemset(d_img_op, 0, HEIGHT * WIDTH * sizeof(pixel_t)); const dim3 block(16, 16, 1); const dim3 grid((WIDTH + block.x - 1) / block.x, (HEIGHT + block.y - 1) / block.y); printf("before canny filter\n"); convolution << <grid, block >> >(d_img_ip, d_img_op, d_gMask, n); hipDeviceSynchronize(); hipMemcpy(h_img_op, d_img_op, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemcpy(d_img_op, h_img_op, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); printf("after 1st convolution\n"); convolution1 << <grid, block >> >(d_img_op, d_gGx, d_mGx, 3); hipDeviceSynchronize(); hipMemcpy(h_gGx, d_gGx, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemcpy(d_gGx, h_gGx, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); convolution1 << <grid, block >> >(d_img_op, d_gGy, d_mGy, 3); hipDeviceSynchronize(); hipMemcpy(h_gGy, d_gGy, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemcpy(d_gGy, h_gGy, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); hipMemset(d_gradient, 0, HEIGHT * WIDTH * sizeof(pixel_t)); pixel_hypot << <grid, block >> >(d_gGx, d_gGy, d_gradient, d_thisAngle); hipDeviceSynchronize(); hipMemcpy(h_gradient, d_gradient, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemcpy(d_gradient, h_gradient, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); hipMemcpy(h_thisAngle, d_thisAngle, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemcpy(d_thisAngle, h_thisAngle, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); hipMemcpy(d_nonMax, h_gradient, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); // for non maximal supression hipMemset(d_edgepoints, 0, HEIGHT * WIDTH * sizeof(pixel_t)); findNonMax << <grid, block >> >(d_nonMax, d_thisAngle, d_edgepoints, 15, 20); hipDeviceSynchronize(); hipMemcpy(h_nonMax, d_nonMax, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemcpy(h_edgepoints, d_edgepoints, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemcpy(d_edgepoints, h_edgepoints, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); hipMemcpy(h_edgepoints, d_edgepoints, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemset(d_edges, 0, HEIGHT * WIDTH * sizeof(pixel_t)); hipMemset(d_visitedmap, 0, HEIGHT * WIDTH * sizeof(pixel_t)); hysterisisThreshold << <grid, block >> >(d_edgepoints, d_edges, d_visitedmap); hipDeviceSynchronize(); hipMemcpy(h_edges, d_edges, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyDeviceToHost); printf(" kernels executed\n"); for (i = 0; i < HEIGHT; i++) { for (j = 0; j < WIDTH; j++) { fprintf(fp2, "%d\t", h_edges[i*WIDTH + j]); } fprintf(fp2, "\n"); } hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipFree(d_img_ip); hipFree(h_img_ip); hipFree(d_img_op); hipFree(h_img_op); hipFree(d_gradient); hipFree(h_gradient); hipFree(h_thisAngle); hipFree(d_thisAngle); hipFree(d_mGx); hipFree(d_mGy); hipFree(h_nonMax); hipFree(d_nonMax); hipFree(h_edgepoints); hipFree(d_edgepoints); hipFree(d_visitedmap); fclose(fp1); fclose(fp2); /*******************************************************************/ pixel_t *h_hough_out_img, *d_hough_out_img; pixel_t *h_img_houghout, *d_img_houghout; //just for h_hough_out_img output without alpha multiplication pixel_t *h_out_img, *d_out_img; //display image int *d_max, *h_max; int nrho, ntheta; nrho = (int)sqrt(HEIGHT*HEIGHT + WIDTH*WIDTH) + 1; ntheta = 271; // -90 ~ 18 h_out_img = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_img_houghout = (pixel_t *)malloc(nrho*ntheta*sizeof(pixel_t)); h_hough_out_img = (pixel_t *)malloc(nrho*ntheta*sizeof(pixel_t)); h_max = (int *)malloc(sizeof(int)); fp3 = fopen("houghoutput.txt", "w"); fp4 = fopen("output.txt", "w"); memset(h_hough_out_img, 0, ntheta * nrho * sizeof(pixel_t)); memset(h_out_img, 0, HEIGHT*WIDTH*sizeof(pixel_t)); //hipMalloc(&d_image_cannyout, HEIGHT * WIDTH * sizeof(int)); hipMalloc(&d_img_houghout, ntheta * nrho * sizeof(pixel_t)); hipMalloc(&d_hough_out_img, ntheta * nrho * sizeof(pixel_t)); hipMalloc(&d_max, sizeof(int)); hipMalloc(&d_out_img, HEIGHT * WIDTH * sizeof(pixel_t)); hipMemset(d_img_houghout, 0, ntheta * nrho * sizeof(pixel_t)); hipMemset(d_hough_out_img, 0, ntheta * nrho * sizeof(pixel_t)); hipMemset(d_out_img, 0, HEIGHT * WIDTH * sizeof(pixel_t)); hipMemcpy(d_edges, h_edges, HEIGHT * WIDTH * sizeof(pixel_t), hipMemcpyHostToDevice); dim3 grid1((WIDTH + block.x - 1) / block.x, (HEIGHT + block.y - 1) / block.y); houghlines << <grid1, block >> >(d_edges, d_hough_out_img, d_img_houghout, d_max, d_out_img); // check the number of threds and blocks hipDeviceSynchronize(); hipMemcpy(h_img_houghout, d_img_houghout, nrho * ntheta* sizeof(pixel_t), hipMemcpyDeviceToHost); hipMemcpy(d_img_houghout, h_img_houghout, nrho * ntheta* sizeof(pixel_t), hipMemcpyHostToDevice); hipMemcpy(h_max, d_max, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(d_max, h_max, sizeof(int), hipMemcpyHostToDevice); dim3 grid2(sqrt((float)(HEIGHT*HEIGHT + WIDTH*WIDTH)) + 1, 271); printf("BEFORE KERNEL2\n"); kernel2 << <grid2, block >> >(d_img_houghout, d_hough_out_img, d_max); printf("AFTER KERNEL2\n"); hipDeviceSynchronize(); dim3 grid3((WIDTH + block.x - 1) / block.x, (HEIGHT + block.y - 1) / block.y); printf("BEFORE KERNEL3\n"); kernel3 << <grid3, block >> >(d_hough_out_img, d_out_img); printf("AFTER KERNEL3\n"); hipDeviceSynchronize(); hipMemcpy(h_out_img, d_out_img, HEIGHT*WIDTH*sizeof(pixel_t), hipMemcpyDeviceToHost); printf("THe max value is:%d\n", *h_max); hipMemcpy(h_hough_out_img, d_hough_out_img, nrho * ntheta* sizeof(pixel_t), hipMemcpyDeviceToHost); for (i = 0; i<nrho; i++) { for (j = 0; j<ntheta; j++) { fprintf(fp3, "%d\t", h_hough_out_img[i*ntheta + j]); } fprintf(fp3, "\n"); } for (i = 0; i<HEIGHT; i++) { for (j = 0; j<WIDTH; j++) { fprintf(fp4, "%d\t", h_out_img[i*WIDTH + j]); } fprintf(fp4, "\n"); } printf("$$$nrho value is:%d$$$\n", nrho); hipFree(d_img_houghout); hipFree(d_max); fclose(fp3); fclose(fp4); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// pixel_t *h_dilout, *d_dilin, *d_dilout; h_dilout = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); fp5 = fopen("dilout.txt", "w"); memset(h_dilout, 0, HEIGHT*WIDTH*sizeof(pixel_t)); hipMalloc(&d_dilin, HEIGHT * WIDTH * sizeof(pixel_t)); hipMalloc(&d_dilout, HEIGHT * WIDTH * sizeof(pixel_t)); hipMemset(d_dilout, 0, HEIGHT * WIDTH * sizeof(pixel_t)); hipMemcpy(d_dilin, h_edges, sizeof(pixel_t), hipMemcpyHostToDevice); hipLaunchKernelGGL(( dilation) , dim3(grid), dim3(block) , 0, 0, d_edges, d_dilout); hipDeviceSynchronize(); printf("AFTER DILATION\n"); hipMemcpy(h_dilout, d_dilout, HEIGHT*WIDTH*sizeof(pixel_t), hipMemcpyDeviceToHost); for (i = 0; i<HEIGHT; i++) { for (j = 0; j<WIDTH; j++) { fprintf(fp5, "%d\t", h_dilout[i*WIDTH + j]); } fprintf(fp5, "\n"); } /////////////////////////////////////////////////////////////////////// pixel_t *h_morout, *d_morout; h_morout = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); fp6 = fopen("morout.txt", "w"); memset(h_morout, 0, HEIGHT*WIDTH*sizeof(pixel_t)); hipMalloc(&d_morout, HEIGHT * WIDTH * sizeof(pixel_t)); hipMemset(d_morout, 0, HEIGHT * WIDTH * sizeof(pixel_t)); hipLaunchKernelGGL(( dilation) , dim3(grid), dim3(block) , 0, 0, d_dilout, d_morout); hipDeviceSynchronize(); printf("AFTER DILATION\n"); hipMemcpy(h_morout, d_morout, HEIGHT*WIDTH*sizeof(pixel_t), hipMemcpyDeviceToHost); for (i = 0; i<HEIGHT; i++) { for (j = 0; j<WIDTH; j++) { fprintf(fp6, "%d\t", h_morout[i*WIDTH + j]); } fprintf(fp6, "\n"); } return 0; }
541951397a6687013e6b985b8cbd4e6a49878d78.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <device_functions.h> #include <cmath> #define HEIGHT 333 #define WIDTH 1366 #define PI 3.14159 #define TRUE 1 #define FALSE 0 #define MAX_BRIGHTNESS 255 typedef int pixel_t; __device__ int global_max = 0; __device__ float alpha = 0; __host__ void gaussian_filter(float *, const float); __global__ void convolution(pixel_t *, pixel_t *, float *, const int); __global__ void convolution1(pixel_t *cin, pixel_t *cout, pixel_t *mask, const int mSize); __device__ void Travers(int, int, pixel_t *, pixel_t *, pixel_t *); __global__ void pixel_hypot(pixel_t *, pixel_t *, pixel_t *, pixel_t *); __global__ void findNonMax(pixel_t *, pixel_t *, pixel_t *, const int, const int); __global__ void hysterisisThreshold(pixel_t *, pixel_t *, pixel_t *); __global__ void kernel2(pixel_t*, pixel_t*, int *); __global__ void kernel3(pixel_t*, pixel_t*); __global__ void houghlines(pixel_t*, pixel_t*, pixel_t*, int *, pixel_t*); __global__ void dilation(pixel_t*, pixel_t*); __global__ void erosion(pixel_t*, pixel_t*); /***************************************************************************/ __global__ void houghlines(pixel_t* d_img_cannyout, pixel_t* d_hough_out_img, pixel_t* d_img_houghout, int *d_max, pixel_t* d_out_img) { float theta; int nrho = (int)sqrt((float)(HEIGHT*HEIGHT) + (float)(WIDTH*WIDTH)) + 1; int ntheta = 271; // -90 ~ 180 float rho = 0, rad = PI / 180; __shared__ int max; max = 0; *d_max = 0; int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i<HEIGHT && j<WIDTH) { if (d_img_cannyout[i*WIDTH + j] == 1) { for (theta = -90; theta <= 180; theta++) { rho = i*cos(theta*rad) + j*sin(theta*rad); if (rho>0 && rho<nrho) { atomicAdd(&d_img_houghout[(int)rho * ntheta + (int)(theta + 90)], 1); //TODO: SHOULD INITIALIZE TO ZERO if (max < (int)d_img_houghout[(int)rho* ntheta + (int)(theta + 90)]) { max = (int)d_img_houghout[(int)rho* ntheta + (int)(theta + 90)]; } } } } } atomicMax(&global_max, (int)max); // if time permits change this to global_max *d_max = global_max; } /****************************************************************/ __global__ void kernel2(pixel_t* d_img_houghout, pixel_t* d_hough_out_img, int *d_max) { int nrho = (int)sqrt((float)(HEIGHT*HEIGHT) + (float)(WIDTH*WIDTH)) + 1; int ntheta = 271; // -90 ~ 180 int k; k = *d_max; alpha = (float)255 / k; //printf("The alpha is:%f and k is:%d\n",alpha,k); int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i<nrho && j<ntheta) { d_hough_out_img[i*ntheta + j] = (pixel_t)(alpha*d_img_houghout[i*ntheta + j]); } } /********************************************************************/ __global__ void kernel3(pixel_t* d_hough_out_img, pixel_t* d_out_img) { float theta; int nrho = (int)sqrt((float)(HEIGHT*HEIGHT) + (float)(WIDTH*WIDTH)) + 1; int ntheta = 271; // -90 ~ 180 float rho = 0, rad = PI / 180; int thresh = 70; int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i<HEIGHT && j<WIDTH) { for (theta = -90; theta<180; theta++) { rho = i*cos(theta*rad) + j*sin(theta*rad); if (rho>0 && rho<nrho && d_hough_out_img[(int)rho* ntheta + (int)(theta + 90)]>thresh) { d_out_img[i*WIDTH + j] = 255; } } } } /****************************************************************/ __global__ void hysterisisThreshold(pixel_t *d_edgepoints, pixel_t *d_edges, pixel_t *d_visitedmap) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (d_edgepoints[row* WIDTH + col] == 1) { d_edges[row* WIDTH + col] = 1; Travers(row, col, d_edgepoints, d_edges, d_visitedmap); d_visitedmap[row* WIDTH + col] = 1; } } /**********************************************************************/ __device__ void Travers(int row, int col, pixel_t *d_edgepoints, pixel_t * d_edges, pixel_t *d_visitedmap) { if (d_visitedmap[row * WIDTH + col] == 1) return; //1 if (d_edgepoints[(row + 1) * WIDTH + col] == 2) { d_edges[(row + 1) * WIDTH + col] = 1; d_visitedmap[(row + 1) * WIDTH + col] = 1; Travers(row + 1, col, d_edgepoints, d_edges, d_visitedmap); return; } //2 if (d_edgepoints[(row + 1) * WIDTH + col - 1] == 2) { d_edges[(row + 1) * WIDTH + col - 1] = 1; d_visitedmap[(row + 1) * WIDTH + col - 1] = 1; Travers(row + 1, col - 1, d_edgepoints, d_edges, d_visitedmap); return; } //3 if (d_edgepoints[(row)* WIDTH + col - 1] == 2) { d_edges[(row)* WIDTH + col - 1] = 1; d_visitedmap[(row)* WIDTH + col - 1] = 1; Travers(row, col - 1, d_edgepoints, d_edges, d_visitedmap); return; } //4 if (d_edgepoints[(row - 1) * WIDTH + col - 1] == 2) { d_edges[(row - 1) * WIDTH + col - 1] = 1; d_visitedmap[(row - 1) * WIDTH + col - 1] = 1; Travers(row - 1, col - 1, d_edgepoints, d_edges, d_visitedmap); return; } //5 if (d_edgepoints[(row - 1) * WIDTH + col] == 2) { d_edges[(row - 1) * WIDTH + col] = 1; d_visitedmap[(row - 1) * WIDTH + col] = 1; Travers(row - 1, col, d_edgepoints, d_edges, d_visitedmap); return; } //6 if (d_edgepoints[(row - 1) * WIDTH + col + 1] == 2) { d_edges[(row - 1) * WIDTH + col + 1] = 1; d_visitedmap[(row - 1) * WIDTH + col + 1] = 1; Travers(row - 1, col + 1, d_edgepoints, d_edges, d_visitedmap); return; } //7 if (d_edgepoints[(row)* WIDTH + col + 1] == 2) { d_edges[(row)* WIDTH + col + 1] = 1; d_visitedmap[(row)* WIDTH + col + 1] = 1; Travers(row, col + 1, d_edgepoints, d_edges, d_visitedmap); return; } //8 if (d_edgepoints[(row + 1) * WIDTH + col + 1] == 2) { d_edges[(row + 1) * WIDTH + col + 1] = 1; d_visitedmap[(row + 1) * WIDTH + col + 1] = 1; Travers(row + 1, col + 1, d_edgepoints, d_edges, d_visitedmap); return; } return; } /*********************************************************/ __global__ void findNonMax(pixel_t *d_nonMax, pixel_t *d_thisAngle, pixel_t *d_edgepoints, const int tmin, const int tmax) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; pixel_t thisAngle = d_thisAngle[row* WIDTH + col]; pixel_t nonMax = d_nonMax[row* WIDTH + col]; if (row >0 && col > 0 && row<HEIGHT && col<WIDTH) { //Horizontal Edge if (((-22.5 < thisAngle) && (thisAngle <= 22.5)) || ((157.5 < thisAngle) && (thisAngle <= -157.5))) { if ((d_nonMax[row* WIDTH + col] < d_nonMax[row* WIDTH + col + 1]) || (d_nonMax[row* WIDTH + col] < d_nonMax[row* WIDTH + col - 1])) nonMax = 0; } //Vertical Edge if (((-112.5 < thisAngle) && (thisAngle <= -67.5)) || ((67.5 < thisAngle) && (thisAngle <= 112.5))) { if ((d_nonMax[row* WIDTH + col] < d_nonMax[(row + 1)* WIDTH + col]) || (d_nonMax[row* WIDTH + col] < d_nonMax[(row - 1)* WIDTH + col])) nonMax = 0; } //+45 Degree Edge if (((-67.5 < thisAngle) && (thisAngle <= -22.5)) || ((112.5 < thisAngle) && (thisAngle <= 157.5))) { if ((d_nonMax[row* WIDTH + col] < d_nonMax[(row + 1)* WIDTH + col - 1]) || (d_nonMax[row* WIDTH + col] < d_nonMax[(row - 1)* WIDTH + col + 1])) nonMax = 0; } //-45 Degree Edge if (((-157.5 < thisAngle) && (thisAngle <= -112.5)) || ((67.5 < thisAngle) && (thisAngle <= 22.5))) { if ((d_nonMax[row* WIDTH + col] < d_nonMax[(row + 1)* WIDTH + (col + 1)]) || (d_nonMax[row* WIDTH + col] < d_nonMax[(row - 1)* WIDTH + col - 1])) nonMax = 0; } d_nonMax[row* WIDTH + col] = nonMax; if (d_nonMax[row* WIDTH + col] > tmax) d_edgepoints[row* WIDTH + col] = 1; else if ((d_nonMax[row* WIDTH + col] < tmax) && (d_nonMax[row* WIDTH + col] > tmin)) d_edgepoints[row* WIDTH + col] = 2; else if (d_nonMax[row* WIDTH + col] < tmin) d_edgepoints[row* WIDTH + col] = 0; } } /*******************************************************/ __global__ void pixel_hypot(pixel_t *d_mGx, pixel_t *d_mGy, pixel_t *d_gradient, pixel_t *d_thisAngle) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; long int gradient = 0; if (row >0 && col > 0 && row<HEIGHT && col<WIDTH) { pixel_t pixel_gx, pixel_gy; pixel_gx = d_mGx[row* WIDTH + col]; pixel_gy = d_mGy[row* WIDTH + col]; gradient = (pixel_t)sqrt((float)((pixel_gx* pixel_gx) + (pixel_gy * pixel_gy))); if (gradient >= 255) d_gradient[row* WIDTH + col] = 255; else d_gradient[row* WIDTH + col] = (pixel_t)gradient; if (pixel_gx != 0) d_thisAngle[row* WIDTH + col] = (atan2f((float)pixel_gy, (float)pixel_gx) * 180) / PI; else if (pixel_gy != 0) d_thisAngle[row* WIDTH + col] = 90; } } /*******************************************************/ __host__ void gaussian_filter(float *gout, const float sigma) { int n = 7; //2 * (int)(2 * sigma) + 3; // size of gaussian mask matrix const float mean = (float)floor(n / 2.0); int i, j; /* The gMask size is calculated from the value of sigma(n= 2 * (int)(2 * sigma) + 3 ) However due to errors in unallocatiion of host memoery in gaussian_filter kernel , we fixed the value of sigma, hence n*/ for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { gout[i*n + j] = exp(-0.5 * (pow((double)(i - mean) / sigma, 2.0) + pow((double)(j - mean) / sigma, 2.0))) / (2 * PI * sigma * sigma); } } } /*******************************************************/ __global__ void convolution(pixel_t *cin, pixel_t *cout, float *mask, const int mSize) { int k = mSize / 2; int i, j; float mpix = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row >= k && row < HEIGHT) && (col >= k && col < WIDTH)) { for (i = -k; i <= k; i++) { for (j = -k; j <= k; j++) { mpix += cin[(row - i) * WIDTH + col - j] * (float)mask[(i + k) * mSize + (j + k)]; } } if (mpix < 0.0) mpix = 0.0; else if (mpix >255.0) mpix = 255.0; cout[row * WIDTH + col] = (pixel_t)mpix; } } /*****************************************************************/ __global__ void convolution1(pixel_t *cin, pixel_t *cout, pixel_t *mask, const int mSize) { int k = mSize / 2; int i, j; float mpix = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row >= k && row < HEIGHT) && (col >= k && col < WIDTH)) { for (i = -k; i <= k; i++) { for (j = -k; j <= k; j++) { mpix += cin[(row - i) * WIDTH + col - j] * (float)mask[(i + k) * mSize + (j + k)]; } } if (mpix < 0.0) mpix = 0.0; else if (mpix >255.0) mpix = 255.0; cout[row * WIDTH + col] = (pixel_t)mpix; } } ////////////////////////////////////////////////////////////////// __global__ void dilation(pixel_t *diin, pixel_t *diout) { int mSize = 5; int k = mSize / 2; int i, j; int count = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row >= k && row < HEIGHT-k) && (col >= k && col < WIDTH-k)) { for (i = -k; i <= k; i++) { for (j = -k; j <= k; j++) { if (diin[(row - i) * WIDTH + col - j] == 1) count += 1; } } if (count >= 1) diout[row * WIDTH + col] = 1; else diout[row * WIDTH + col] = 0; } } ////////////////////////////////////////////////////////////////// __global__ void erosion(pixel_t *erin, pixel_t *erout) { int mSize = 11; int k = mSize / 2; int i, j; int count = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if ((row >= k && row < HEIGHT-k) && (col >= k && col < WIDTH-k)) { for (i = -k; i <= k; i++) { for (j = -k; j <= k; j++) { if (erin[(row - i) * WIDTH + col - j] == 1) count += 1; } } if (count == mSize*mSize) erout[row * WIDTH + col] = 1; else erout[row * WIDTH + col] = 0; } } /**************************************************************/ int main(int argc, char *argv[]) { pixel_t *h_img_ip, *h_img_op; // input and output images in the host pixel_t *d_img_ip, *d_img_op; // input and output images in the device pixel_t *d_gradient, *h_gradient, *d_thisAngle, *h_thisAngle; int i, j; int dev_count; const float sigma = 1.4; float *h_gMask, *d_gMask; pixel_t *d_gGx, *h_gGx, *d_gGy, *h_gGy; pixel_t *d_mGx, *d_mGy; pixel_t *h_nonMax, *d_nonMax; pixel_t *h_edgepoints, *d_edgepoints; pixel_t *h_edges, *d_edges; pixel_t *d_visitedmap; int n = 7; //2 * (int)(2 * sigma) + 3; // size of gaussian mask matrix pixel_t h_mGx[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; pixel_t h_mGy[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; h_img_ip = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_img_op = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_gGx = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_gGy = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_thisAngle = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_gradient = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_nonMax = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_edgepoints = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_edges = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_gMask = (float *)malloc(n * n * sizeof(float)); FILE *fp1, *fp2, *fp3, *fp4, *fp5, *fp6; cudaGetDeviceCount(&dev_count); printf("Dev count: %d\n", dev_count); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); cudaMalloc(&d_img_ip, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_img_op, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_gGx, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_gGy, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_gradient, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_thisAngle, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_nonMax, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_edgepoints, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_edges, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_visitedmap, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_mGx, 3 * 3 * sizeof(pixel_t)); cudaMalloc(&d_mGy, 3 * 3 * sizeof(pixel_t)); cudaMalloc(&d_gMask, n * n * sizeof(float)); fp1 = fopen("car1.txt", "r"); fp2 = fopen("cannyoutput.txt", "w"); for (i = 0; i < HEIGHT; i++) { for (j = 0; j < WIDTH; j++) { fscanf(fp1, "%d ", &h_img_ip[i*WIDTH + j]); } } printf("before gaussian filter\n"); gaussian_filter(h_gMask, sigma); cudaMemcpy(d_gMask, h_gMask, n * n* sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_mGx, h_mGx, 3 * 3 * sizeof(pixel_t), cudaMemcpyHostToDevice); cudaMemcpy(d_mGy, h_mGy, 3 * 3 * sizeof(pixel_t), cudaMemcpyHostToDevice); cudaMemcpy(d_img_ip, h_img_ip, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); cudaMemset(d_img_op, 0, HEIGHT * WIDTH * sizeof(pixel_t)); const dim3 block(16, 16, 1); const dim3 grid((WIDTH + block.x - 1) / block.x, (HEIGHT + block.y - 1) / block.y); printf("before canny filter\n"); convolution << <grid, block >> >(d_img_ip, d_img_op, d_gMask, n); cudaDeviceSynchronize(); cudaMemcpy(h_img_op, d_img_op, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemcpy(d_img_op, h_img_op, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); printf("after 1st convolution\n"); convolution1 << <grid, block >> >(d_img_op, d_gGx, d_mGx, 3); cudaDeviceSynchronize(); cudaMemcpy(h_gGx, d_gGx, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemcpy(d_gGx, h_gGx, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); convolution1 << <grid, block >> >(d_img_op, d_gGy, d_mGy, 3); cudaDeviceSynchronize(); cudaMemcpy(h_gGy, d_gGy, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemcpy(d_gGy, h_gGy, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); cudaMemset(d_gradient, 0, HEIGHT * WIDTH * sizeof(pixel_t)); pixel_hypot << <grid, block >> >(d_gGx, d_gGy, d_gradient, d_thisAngle); cudaDeviceSynchronize(); cudaMemcpy(h_gradient, d_gradient, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemcpy(d_gradient, h_gradient, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); cudaMemcpy(h_thisAngle, d_thisAngle, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemcpy(d_thisAngle, h_thisAngle, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); cudaMemcpy(d_nonMax, h_gradient, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); // for non maximal supression cudaMemset(d_edgepoints, 0, HEIGHT * WIDTH * sizeof(pixel_t)); findNonMax << <grid, block >> >(d_nonMax, d_thisAngle, d_edgepoints, 15, 20); cudaDeviceSynchronize(); cudaMemcpy(h_nonMax, d_nonMax, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemcpy(h_edgepoints, d_edgepoints, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemcpy(d_edgepoints, h_edgepoints, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); cudaMemcpy(h_edgepoints, d_edgepoints, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemset(d_edges, 0, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMemset(d_visitedmap, 0, HEIGHT * WIDTH * sizeof(pixel_t)); hysterisisThreshold << <grid, block >> >(d_edgepoints, d_edges, d_visitedmap); cudaDeviceSynchronize(); cudaMemcpy(h_edges, d_edges, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyDeviceToHost); printf(" kernels executed\n"); for (i = 0; i < HEIGHT; i++) { for (j = 0; j < WIDTH; j++) { fprintf(fp2, "%d\t", h_edges[i*WIDTH + j]); } fprintf(fp2, "\n"); } cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaFree(d_img_ip); cudaFree(h_img_ip); cudaFree(d_img_op); cudaFree(h_img_op); cudaFree(d_gradient); cudaFree(h_gradient); cudaFree(h_thisAngle); cudaFree(d_thisAngle); cudaFree(d_mGx); cudaFree(d_mGy); cudaFree(h_nonMax); cudaFree(d_nonMax); cudaFree(h_edgepoints); cudaFree(d_edgepoints); cudaFree(d_visitedmap); fclose(fp1); fclose(fp2); /*******************************************************************/ pixel_t *h_hough_out_img, *d_hough_out_img; pixel_t *h_img_houghout, *d_img_houghout; //just for h_hough_out_img output without alpha multiplication pixel_t *h_out_img, *d_out_img; //display image int *d_max, *h_max; int nrho, ntheta; nrho = (int)sqrt(HEIGHT*HEIGHT + WIDTH*WIDTH) + 1; ntheta = 271; // -90 ~ 18 h_out_img = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); h_img_houghout = (pixel_t *)malloc(nrho*ntheta*sizeof(pixel_t)); h_hough_out_img = (pixel_t *)malloc(nrho*ntheta*sizeof(pixel_t)); h_max = (int *)malloc(sizeof(int)); fp3 = fopen("houghoutput.txt", "w"); fp4 = fopen("output.txt", "w"); memset(h_hough_out_img, 0, ntheta * nrho * sizeof(pixel_t)); memset(h_out_img, 0, HEIGHT*WIDTH*sizeof(pixel_t)); //cudaMalloc(&d_image_cannyout, HEIGHT * WIDTH * sizeof(int)); cudaMalloc(&d_img_houghout, ntheta * nrho * sizeof(pixel_t)); cudaMalloc(&d_hough_out_img, ntheta * nrho * sizeof(pixel_t)); cudaMalloc(&d_max, sizeof(int)); cudaMalloc(&d_out_img, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMemset(d_img_houghout, 0, ntheta * nrho * sizeof(pixel_t)); cudaMemset(d_hough_out_img, 0, ntheta * nrho * sizeof(pixel_t)); cudaMemset(d_out_img, 0, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMemcpy(d_edges, h_edges, HEIGHT * WIDTH * sizeof(pixel_t), cudaMemcpyHostToDevice); dim3 grid1((WIDTH + block.x - 1) / block.x, (HEIGHT + block.y - 1) / block.y); houghlines << <grid1, block >> >(d_edges, d_hough_out_img, d_img_houghout, d_max, d_out_img); // check the number of threds and blocks cudaDeviceSynchronize(); cudaMemcpy(h_img_houghout, d_img_houghout, nrho * ntheta* sizeof(pixel_t), cudaMemcpyDeviceToHost); cudaMemcpy(d_img_houghout, h_img_houghout, nrho * ntheta* sizeof(pixel_t), cudaMemcpyHostToDevice); cudaMemcpy(h_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(d_max, h_max, sizeof(int), cudaMemcpyHostToDevice); dim3 grid2(sqrt((float)(HEIGHT*HEIGHT + WIDTH*WIDTH)) + 1, 271); printf("BEFORE KERNEL2\n"); kernel2 << <grid2, block >> >(d_img_houghout, d_hough_out_img, d_max); printf("AFTER KERNEL2\n"); cudaDeviceSynchronize(); dim3 grid3((WIDTH + block.x - 1) / block.x, (HEIGHT + block.y - 1) / block.y); printf("BEFORE KERNEL3\n"); kernel3 << <grid3, block >> >(d_hough_out_img, d_out_img); printf("AFTER KERNEL3\n"); cudaDeviceSynchronize(); cudaMemcpy(h_out_img, d_out_img, HEIGHT*WIDTH*sizeof(pixel_t), cudaMemcpyDeviceToHost); printf("THe max value is:%d\n", *h_max); cudaMemcpy(h_hough_out_img, d_hough_out_img, nrho * ntheta* sizeof(pixel_t), cudaMemcpyDeviceToHost); for (i = 0; i<nrho; i++) { for (j = 0; j<ntheta; j++) { fprintf(fp3, "%d\t", h_hough_out_img[i*ntheta + j]); } fprintf(fp3, "\n"); } for (i = 0; i<HEIGHT; i++) { for (j = 0; j<WIDTH; j++) { fprintf(fp4, "%d\t", h_out_img[i*WIDTH + j]); } fprintf(fp4, "\n"); } printf("$$$nrho value is:%d$$$\n", nrho); cudaFree(d_img_houghout); cudaFree(d_max); fclose(fp3); fclose(fp4); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// pixel_t *h_dilout, *d_dilin, *d_dilout; h_dilout = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); fp5 = fopen("dilout.txt", "w"); memset(h_dilout, 0, HEIGHT*WIDTH*sizeof(pixel_t)); cudaMalloc(&d_dilin, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMalloc(&d_dilout, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMemset(d_dilout, 0, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMemcpy(d_dilin, h_edges, sizeof(pixel_t), cudaMemcpyHostToDevice); dilation <<<grid, block >>>(d_edges, d_dilout); cudaDeviceSynchronize(); printf("AFTER DILATION\n"); cudaMemcpy(h_dilout, d_dilout, HEIGHT*WIDTH*sizeof(pixel_t), cudaMemcpyDeviceToHost); for (i = 0; i<HEIGHT; i++) { for (j = 0; j<WIDTH; j++) { fprintf(fp5, "%d\t", h_dilout[i*WIDTH + j]); } fprintf(fp5, "\n"); } /////////////////////////////////////////////////////////////////////// pixel_t *h_morout, *d_morout; h_morout = (pixel_t *)malloc(HEIGHT * WIDTH *sizeof(pixel_t)); fp6 = fopen("morout.txt", "w"); memset(h_morout, 0, HEIGHT*WIDTH*sizeof(pixel_t)); cudaMalloc(&d_morout, HEIGHT * WIDTH * sizeof(pixel_t)); cudaMemset(d_morout, 0, HEIGHT * WIDTH * sizeof(pixel_t)); dilation <<<grid, block >>>(d_dilout, d_morout); cudaDeviceSynchronize(); printf("AFTER DILATION\n"); cudaMemcpy(h_morout, d_morout, HEIGHT*WIDTH*sizeof(pixel_t), cudaMemcpyDeviceToHost); for (i = 0; i<HEIGHT; i++) { for (j = 0; j<WIDTH; j++) { fprintf(fp6, "%d\t", h_morout[i*WIDTH + j]); } fprintf(fp6, "\n"); } return 0; }
48e8774e45cd1ef07ab7007e59ff39cb4f38f942.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @author Mark Gates @author Azzam Haidar @generated from zlacpy.cu normal z -> d, Fri Sep 11 18:29:20 2015 */ #include "common_magma.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for dlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd. */ static __device__ void dlacpy_full_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /* Similar to dlacpy_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlacpy_lower_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /* Similar to dlacpy_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlacpy_upper_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { dB[j*lddb] = dA[j*ldda]; } } } } } ////////////////////////////////////////////////////////////////////////////////////// /* kernel wrappers to call the device functions. */ __global__ void dlacpy_full_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_full_device(m, n, dA, ldda, dB, lddb); } __global__ void dlacpy_lower_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_lower_device(m, n, dA, ldda, dB, lddb); } __global__ void dlacpy_upper_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_upper_device(m, n, dA, ldda, dB, lddb); } ////////////////////////////////////////////////////////////////////////////////////// /* kernel wrappers to call the device functions for the batched routine. */ __global__ void dlacpy_full_kernel_batched( int m, int n, double const * const *dAarray, int ldda, double **dBarray, int lddb ) { int batchid = blockIdx.z; dlacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb); } __global__ void dlacpy_lower_kernel_batched( int m, int n, double const * const *dAarray, int ldda, double **dBarray, int lddb ) { int batchid = blockIdx.z; dlacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb); } __global__ void dlacpy_upper_kernel_batched( int m, int n, double const * const *dAarray, int ldda, double **dBarray, int lddb ) { int batchid = blockIdx.z; dlacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb); } ////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DLACPY_Q copies all or part of a two-dimensional matrix dA to another matrix dB. This is the same as DLACPY, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[out] dB DOUBLE_PRECISION array, dimension (LDDB,N) The M-by-N matrix dB. On exit, dB = dA in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlacpy_q( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if ( uplo == MagmaLower ) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( dlacpy_lower_kernel), dim3(grid), dim3(threads), 0, queue , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } else { // off diagonal super block hipLaunchKernelGGL(( dlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } } else if ( uplo == MagmaUpper ) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block hipLaunchKernelGGL(( dlacpy_upper_kernel), dim3(grid), dim3(threads), 0, queue , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } else { // off diagonal super block hipLaunchKernelGGL(( dlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } } else { // TODO: use hipMemcpy or hipMemcpy2D ? for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); hipLaunchKernelGGL(( dlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue , mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } } /** @see magmablas_dlacpy_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlacpy( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dB, magma_int_t lddb ) { magmablas_dlacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream ); } //////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DLACPY_BATCHED copies all or part of each two-dimensional matrix dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of each matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part Otherwise: All of each matrix dA @param[in] m INTEGER The number of rows of each matrix dA. M >= 0. @param[in] n INTEGER The number of columns of each matrix dA. N >= 0. @param[in] dAarray DOUBLE_PRECISION* array, dimension (batchCount) Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N). The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of each array dA. LDDA >= max(1,M). @param[out] dBarray DOUBLE_PRECISION* array, dimension (batchCount) Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N). The M-by-N matrix dB. On exit, dB = dA in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of each array dB. LDDB >= max(1,M). @param[in] batchCount Number of matrices in dAarray and dBarray. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlacpy_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDouble_const_ptr const dAarray[], magma_int_t ldda, magmaDouble_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount ); if ( uplo == MagmaLower ) { hipLaunchKernelGGL(( dlacpy_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb ); } else if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( dlacpy_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb ); } else { hipLaunchKernelGGL(( dlacpy_full_kernel_batched) , dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb ); } }
48e8774e45cd1ef07ab7007e59ff39cb4f38f942.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @author Mark Gates @author Azzam Haidar @generated from zlacpy.cu normal z -> d, Fri Sep 11 18:29:20 2015 */ #include "common_magma.h" // To deal with really large matrices, this launchs multiple super blocks, // each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64. // CUDA architecture 2.0 limits each grid dimension to 64K-1. // Instances arose for vectors used by sparse matrices with M > 4194240, though N is small. const magma_int_t max_blocks = 65535; // BLK_X and BLK_Y need to be equal for dlaset_q to deal with diag & offdiag // when looping over super blocks. // Formerly, BLK_X and BLK_Y could be different. #define BLK_X 64 #define BLK_Y BLK_X /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to dlaset, dlacpy, dlag2s, clag2z, dgeadd. */ static __device__ void dlacpy_full_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /* Similar to dlacpy_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlacpy_lower_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < m && ind + BLK_X > iby ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /* Similar to dlacpy_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to dlaset, dlacpy, zlat2c, clat2z. */ static __device__ void dlacpy_upper_device( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { dB[j*lddb] = dA[j*ldda]; } } } } } ////////////////////////////////////////////////////////////////////////////////////// /* kernel wrappers to call the device functions. */ __global__ void dlacpy_full_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_full_device(m, n, dA, ldda, dB, lddb); } __global__ void dlacpy_lower_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_lower_device(m, n, dA, ldda, dB, lddb); } __global__ void dlacpy_upper_kernel( int m, int n, const double *dA, int ldda, double *dB, int lddb ) { dlacpy_upper_device(m, n, dA, ldda, dB, lddb); } ////////////////////////////////////////////////////////////////////////////////////// /* kernel wrappers to call the device functions for the batched routine. */ __global__ void dlacpy_full_kernel_batched( int m, int n, double const * const *dAarray, int ldda, double **dBarray, int lddb ) { int batchid = blockIdx.z; dlacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb); } __global__ void dlacpy_lower_kernel_batched( int m, int n, double const * const *dAarray, int ldda, double **dBarray, int lddb ) { int batchid = blockIdx.z; dlacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb); } __global__ void dlacpy_upper_kernel_batched( int m, int n, double const * const *dAarray, int ldda, double **dBarray, int lddb ) { int batchid = blockIdx.z; dlacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb); } ////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DLACPY_Q copies all or part of a two-dimensional matrix dA to another matrix dB. This is the same as DLACPY, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[out] dB DOUBLE_PRECISION array, dimension (LDDB,N) The M-by-N matrix dB. On exit, dB = dA in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlacpy_q( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dB, magma_int_t lddb, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) #define dB(i_, j_) (dB + (i_) + (j_)*lddb) magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } assert( BLK_X == BLK_Y ); const magma_int_t super_NB = max_blocks*BLK_X; dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) ); dim3 threads( BLK_X, 1 ); dim3 grid; magma_int_t mm, nn; if ( uplo == MagmaLower ) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block dlacpy_lower_kernel<<< grid, threads, 0, queue >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } else { // off diagonal super block dlacpy_full_kernel <<< grid, threads, 0, queue >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } } else if ( uplo == MagmaUpper ) { for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); if ( i == j ) { // diagonal super block dlacpy_upper_kernel<<< grid, threads, 0, queue >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } else { // off diagonal super block dlacpy_full_kernel <<< grid, threads, 0, queue >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } } else { // TODO: use cudaMemcpy or cudaMemcpy2D ? for( unsigned int i=0; i < super_grid.x; ++i ) { mm = (i == super_grid.x-1 ? m % super_NB : super_NB); grid.x = magma_ceildiv( mm, BLK_X ); for( unsigned int j=0; j < super_grid.y; ++j ) { // full row nn = (j == super_grid.y-1 ? n % super_NB : super_NB); grid.y = magma_ceildiv( nn, BLK_Y ); dlacpy_full_kernel <<< grid, threads, 0, queue >>> ( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb ); } } } } /** @see magmablas_dlacpy_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlacpy( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dB, magma_int_t lddb ) { magmablas_dlacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream ); } //////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- DLACPY_BATCHED copies all or part of each two-dimensional matrix dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of each matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part Otherwise: All of each matrix dA @param[in] m INTEGER The number of rows of each matrix dA. M >= 0. @param[in] n INTEGER The number of columns of each matrix dA. N >= 0. @param[in] dAarray DOUBLE_PRECISION* array, dimension (batchCount) Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N). The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of each array dA. LDDA >= max(1,M). @param[out] dBarray DOUBLE_PRECISION* array, dimension (batchCount) Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N). The M-by-N matrix dB. On exit, dB = dA in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of each array dB. LDDB >= max(1,M). @param[in] batchCount Number of matrices in dAarray and dBarray. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlacpy_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magmaDouble_const_ptr const dAarray[], magma_int_t ldda, magmaDouble_ptr dBarray[], magma_int_t lddb, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) { return; } dim3 threads( BLK_X, 1, 1 ); dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount ); if ( uplo == MagmaLower ) { dlacpy_lower_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb ); } else if ( uplo == MagmaUpper ) { dlacpy_upper_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb ); } else { dlacpy_full_kernel_batched <<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb ); } }
e45f7e48a0785d9f376889ff58868d947fc04eae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright [2019] [Christopher Syben] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Voxel-driven parllel-beam projector CUDA kernel * Implementation partially adapted from CONRAD * PYRO-NN is developed as an Open Source project under the Apache License, * Version 2.0. */ #include "helper_headers/helper_grid.h" #include "helper_headers/helper_math.h" texture<float, hipTextureType2D, hipReadModeElementType> volume_as_texture; #define CUDART_INF_F __int_as_float(0x7f800000) inline __device__ float kernel_project2D(const float2 source_point, const float2 ray_vector, const float step_size, const int2 volume_size, const float2 volume_origin, const float2 volume_spacing) { unsigned int detector_idx = blockIdx.x * blockDim.x + threadIdx.x; int projection_idx = blockIdx.y; float pixel = 0.0f; // Step 1: compute alpha value at entry and exit point of the volume float min_alpha, max_alpha; min_alpha = 0; max_alpha = CUDART_INF_F; if (0.0f != ray_vector.x) { float volume_min_edge_point = index_to_physical(0, volume_origin.x, volume_spacing.x) - 0.5f; float volume_max_edge_point = index_to_physical(volume_size.x, volume_origin.x, volume_spacing.x) - 0.5f; float reci = 1.0f / ray_vector.x; float alpha0 = (volume_min_edge_point - source_point.x) * reci; float alpha1 = (volume_max_edge_point - source_point.x) * reci; min_alpha = fmin(alpha0, alpha1); max_alpha = fmax(alpha0, alpha1); } if (0.0f != ray_vector.y) { float volume_min_edge_point = index_to_physical(0, volume_origin.y, volume_spacing.y) - 0.5f; float volume_max_edge_point = index_to_physical(volume_size.y, volume_origin.y, volume_spacing.y) - 0.5f; float reci = 1.0f / ray_vector.y; float alpha0 = (volume_min_edge_point - source_point.y) * reci; float alpha1 = (volume_max_edge_point - source_point.y) * reci; min_alpha = fmax(min_alpha, fmin(alpha0, alpha1)); max_alpha = fmin(max_alpha, fmax(alpha0, alpha1)); } float px, py; // pixel = source_point.x + min_alpha * ray_vector.x; // Entrance boundary // In CUDA, voxel centers are located at (xx.5, xx.5, xx.5), // whereas, SwVolume has voxel centers at integers. // For the initial interpolated value, only a half stepsize is // considered in the computation. if (min_alpha < max_alpha) { px = source_point.x + min_alpha * ray_vector.x; py = source_point.y + min_alpha * ray_vector.y; pixel += 0.5f * tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); min_alpha += step_size; } // Mid segments while (min_alpha < max_alpha) { px = source_point.x + min_alpha * ray_vector.x; py = source_point.y + min_alpha * ray_vector.y; float2 interp_point = physical_to_index(make_float2(px, py), volume_origin, volume_spacing); pixel += tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); min_alpha += step_size; } // Scaling by stepsize; pixel *= step_size; // Last segment of the line if (pixel > 0.0f) { pixel -= 0.5f * step_size * tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); min_alpha -= step_size; float last_step_size = max_alpha - min_alpha; pixel += 0.5f * last_step_size * tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); px = source_point.x + max_alpha * ray_vector.x; py = source_point.y + max_alpha * ray_vector.y; // The last segment of the line integral takes care of the // varying length. pixel += 0.5f * last_step_size * tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); } return pixel; } __global__ void project_2Dpar_beam_kernel( float *pSinogram, const float2 *d_rays, const int number_of_projections, const float sampling_step_size, const int2 volume_size, const float2 volume_spacing, const float2 volume_origin, const int detector_size, const float detector_spacing, const float detector_origin) { unsigned int detector_idx = blockIdx.x * blockDim.x + threadIdx.x; if (detector_idx >= detector_size) { return; } // Preparations: // Assume a source isocenter distance to compute the start of the ray, // although sid is not neseccary for a par beam geometry float sid = sqrt((float)(volume_size.x * volume_spacing.x * volume_size.x * volume_spacing.x) + (volume_size.y * volume_spacing.y * volume_size.y * volume_spacing.y)) * 1.2f; int projection_idx = blockIdx.y; float2 ray_vector = d_rays[projection_idx]; // create detector coordinate system (u,v) w.r.t the ray float2 u_vec = make_float2(-ray_vector.y, ray_vector.x); // calculate physical coordinate of detector pixel float u = index_to_physical(detector_idx, detector_origin, detector_spacing); // Calculate "source"-Point (start point for the parallel ray), so we can use // the projection kernel Assume a source isocenter distance to compute the // start of the ray, although sid is not neseccary for a par beam geometry float2 virtual_source_point = ray_vector * (-sid) + u_vec * u; float pixel = kernel_project2D( virtual_source_point, ray_vector, sampling_step_size, // * fmin(volume_spacing.x, volume_spacing.y), volume_size, volume_origin, volume_spacing); pixel *= sqrt( (ray_vector.x * volume_spacing.x) * (ray_vector.x * volume_spacing.x) + (ray_vector.y * volume_spacing.y) * (ray_vector.y * volume_spacing.y)); unsigned sinogram_idx = projection_idx * detector_size + detector_idx; pSinogram[sinogram_idx] = pixel; return; } void Parallel_Projection2D_Kernel_Launcher( const float *volume_ptr, float *out, const float *ray_vectors, const int number_of_projections, const int volume_width, const int volume_height, const float volume_spacing_x, const float volume_spacing_y, const float volume_origin_x, const float volume_origin_y, const int detector_size, const float detector_spacing, const float detector_origin) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); volume_as_texture.addressMode[0] = hipAddressModeBorder; volume_as_texture.addressMode[1] = hipAddressModeBorder; volume_as_texture.filterMode = hipFilterModeLinear; volume_as_texture.normalized = false; hipArray *volume_array; hipMallocArray(&volume_array, &channelDesc, volume_width, volume_height); hipMemcpyToArray(volume_array, 0, 0, volume_ptr, volume_width * volume_height * sizeof(float), hipMemcpyHostToDevice); hipBindTextureToArray(volume_as_texture, volume_array, channelDesc); auto ray_size_b = number_of_projections * sizeof(float2); float2 *d_rays; hipMalloc(&d_rays, ray_size_b); hipMemcpy(d_rays, ray_vectors, ray_size_b, hipMemcpyHostToDevice); float sampling_step_size = 0.2; int2 volume_size = make_int2(volume_width, volume_height); float2 volume_spacing = make_float2(volume_spacing_x, volume_spacing_y); float2 volume_origin = make_float2(volume_origin_x, volume_origin_y); const unsigned blocksize = 256; const dim3 gridsize = dim3((detector_size / blocksize) + 1, number_of_projections); hipLaunchKernelGGL(( project_2Dpar_beam_kernel), dim3(gridsize), dim3(blocksize), 0, 0, out, d_rays, number_of_projections, sampling_step_size, volume_size, volume_spacing, volume_origin, detector_size, detector_spacing, detector_origin); // cleanup hipUnbindTexture(volume_as_texture); hipFreeArray(volume_array); hipFree(d_rays); }
e45f7e48a0785d9f376889ff58868d947fc04eae.cu
/* * Copyright [2019] [Christopher Syben] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Voxel-driven parllel-beam projector CUDA kernel * Implementation partially adapted from CONRAD * PYRO-NN is developed as an Open Source project under the Apache License, * Version 2.0. */ #include "helper_headers/helper_grid.h" #include "helper_headers/helper_math.h" texture<float, cudaTextureType2D, cudaReadModeElementType> volume_as_texture; #define CUDART_INF_F __int_as_float(0x7f800000) inline __device__ float kernel_project2D(const float2 source_point, const float2 ray_vector, const float step_size, const int2 volume_size, const float2 volume_origin, const float2 volume_spacing) { unsigned int detector_idx = blockIdx.x * blockDim.x + threadIdx.x; int projection_idx = blockIdx.y; float pixel = 0.0f; // Step 1: compute alpha value at entry and exit point of the volume float min_alpha, max_alpha; min_alpha = 0; max_alpha = CUDART_INF_F; if (0.0f != ray_vector.x) { float volume_min_edge_point = index_to_physical(0, volume_origin.x, volume_spacing.x) - 0.5f; float volume_max_edge_point = index_to_physical(volume_size.x, volume_origin.x, volume_spacing.x) - 0.5f; float reci = 1.0f / ray_vector.x; float alpha0 = (volume_min_edge_point - source_point.x) * reci; float alpha1 = (volume_max_edge_point - source_point.x) * reci; min_alpha = fmin(alpha0, alpha1); max_alpha = fmax(alpha0, alpha1); } if (0.0f != ray_vector.y) { float volume_min_edge_point = index_to_physical(0, volume_origin.y, volume_spacing.y) - 0.5f; float volume_max_edge_point = index_to_physical(volume_size.y, volume_origin.y, volume_spacing.y) - 0.5f; float reci = 1.0f / ray_vector.y; float alpha0 = (volume_min_edge_point - source_point.y) * reci; float alpha1 = (volume_max_edge_point - source_point.y) * reci; min_alpha = fmax(min_alpha, fmin(alpha0, alpha1)); max_alpha = fmin(max_alpha, fmax(alpha0, alpha1)); } float px, py; // pixel = source_point.x + min_alpha * ray_vector.x; // Entrance boundary // In CUDA, voxel centers are located at (xx.5, xx.5, xx.5), // whereas, SwVolume has voxel centers at integers. // For the initial interpolated value, only a half stepsize is // considered in the computation. if (min_alpha < max_alpha) { px = source_point.x + min_alpha * ray_vector.x; py = source_point.y + min_alpha * ray_vector.y; pixel += 0.5f * tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); min_alpha += step_size; } // Mid segments while (min_alpha < max_alpha) { px = source_point.x + min_alpha * ray_vector.x; py = source_point.y + min_alpha * ray_vector.y; float2 interp_point = physical_to_index(make_float2(px, py), volume_origin, volume_spacing); pixel += tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); min_alpha += step_size; } // Scaling by stepsize; pixel *= step_size; // Last segment of the line if (pixel > 0.0f) { pixel -= 0.5f * step_size * tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); min_alpha -= step_size; float last_step_size = max_alpha - min_alpha; pixel += 0.5f * last_step_size * tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); px = source_point.x + max_alpha * ray_vector.x; py = source_point.y + max_alpha * ray_vector.y; // The last segment of the line integral takes care of the // varying length. pixel += 0.5f * last_step_size * tex2D(volume_as_texture, physical_to_index(px, volume_origin.x, volume_spacing.x) + 0.5f, physical_to_index(py, volume_origin.y, volume_spacing.y) + 0.5f); } return pixel; } __global__ void project_2Dpar_beam_kernel( float *pSinogram, const float2 *d_rays, const int number_of_projections, const float sampling_step_size, const int2 volume_size, const float2 volume_spacing, const float2 volume_origin, const int detector_size, const float detector_spacing, const float detector_origin) { unsigned int detector_idx = blockIdx.x * blockDim.x + threadIdx.x; if (detector_idx >= detector_size) { return; } // Preparations: // Assume a source isocenter distance to compute the start of the ray, // although sid is not neseccary for a par beam geometry float sid = sqrt((float)(volume_size.x * volume_spacing.x * volume_size.x * volume_spacing.x) + (volume_size.y * volume_spacing.y * volume_size.y * volume_spacing.y)) * 1.2f; int projection_idx = blockIdx.y; float2 ray_vector = d_rays[projection_idx]; // create detector coordinate system (u,v) w.r.t the ray float2 u_vec = make_float2(-ray_vector.y, ray_vector.x); // calculate physical coordinate of detector pixel float u = index_to_physical(detector_idx, detector_origin, detector_spacing); // Calculate "source"-Point (start point for the parallel ray), so we can use // the projection kernel Assume a source isocenter distance to compute the // start of the ray, although sid is not neseccary for a par beam geometry float2 virtual_source_point = ray_vector * (-sid) + u_vec * u; float pixel = kernel_project2D( virtual_source_point, ray_vector, sampling_step_size, // * fmin(volume_spacing.x, volume_spacing.y), volume_size, volume_origin, volume_spacing); pixel *= sqrt( (ray_vector.x * volume_spacing.x) * (ray_vector.x * volume_spacing.x) + (ray_vector.y * volume_spacing.y) * (ray_vector.y * volume_spacing.y)); unsigned sinogram_idx = projection_idx * detector_size + detector_idx; pSinogram[sinogram_idx] = pixel; return; } void Parallel_Projection2D_Kernel_Launcher( const float *volume_ptr, float *out, const float *ray_vectors, const int number_of_projections, const int volume_width, const int volume_height, const float volume_spacing_x, const float volume_spacing_y, const float volume_origin_x, const float volume_origin_y, const int detector_size, const float detector_spacing, const float detector_origin) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); volume_as_texture.addressMode[0] = cudaAddressModeBorder; volume_as_texture.addressMode[1] = cudaAddressModeBorder; volume_as_texture.filterMode = cudaFilterModeLinear; volume_as_texture.normalized = false; cudaArray *volume_array; cudaMallocArray(&volume_array, &channelDesc, volume_width, volume_height); cudaMemcpyToArray(volume_array, 0, 0, volume_ptr, volume_width * volume_height * sizeof(float), cudaMemcpyHostToDevice); cudaBindTextureToArray(volume_as_texture, volume_array, channelDesc); auto ray_size_b = number_of_projections * sizeof(float2); float2 *d_rays; cudaMalloc(&d_rays, ray_size_b); cudaMemcpy(d_rays, ray_vectors, ray_size_b, cudaMemcpyHostToDevice); float sampling_step_size = 0.2; int2 volume_size = make_int2(volume_width, volume_height); float2 volume_spacing = make_float2(volume_spacing_x, volume_spacing_y); float2 volume_origin = make_float2(volume_origin_x, volume_origin_y); const unsigned blocksize = 256; const dim3 gridsize = dim3((detector_size / blocksize) + 1, number_of_projections); project_2Dpar_beam_kernel<<<gridsize, blocksize>>>( out, d_rays, number_of_projections, sampling_step_size, volume_size, volume_spacing, volume_origin, detector_size, detector_spacing, detector_origin); // cleanup cudaUnbindTexture(volume_as_texture); cudaFreeArray(volume_array); cudaFree(d_rays); }
b597ef6e4106b99aee4afd65d1a36f7ee16fda39.hip
// !!! This is a file automatically generated by hipify!!! /* * UtilityFunctions.cu * * Created on: 17/03/2016 * Author: vincentvillani */ #include "../Header/UtilityFunctions.h" #include "../Header/RFIMHelperFunctions.h" #include "../Header/CudaUtilityFunctions.h" #include <rocblas.h> //Write a host signal matrix to a file void Utility_WriteSignalMatrixToFile(const std::string filename, float* h_rowMajorSignalMatrix, uint64_t rows, uint64_t columns) { FILE* signalFile = fopen(filename.c_str(), "w"); if(signalFile == NULL) { fprintf(stderr, "WriteSignalMatrixToFile: failed to open %s file\n", filename.c_str()); //exit(1); } for(uint32_t currentRow = 0; currentRow < rows; ++currentRow) { for(uint32_t currentCol = 0; currentCol < columns; ++currentCol) { //If last item in the column, write it without the " " if(currentCol == columns - 1) fprintf(signalFile, "%f", h_rowMajorSignalMatrix[currentRow * columns + currentCol] ); else fprintf(signalFile, "%f ", h_rowMajorSignalMatrix[currentRow * columns + currentCol] ); } //Print a newline for each row except the last one if(currentRow != currentRow - 1) fprintf(signalFile, "\n"); } fclose(signalFile); } void Utility_DeviceWriteSignalMatrixToFile(const std::string filename, float* d_rowMajorSignalMatrix, uint64_t rows, uint64_t columns, bool transpose) { uint32_t matrixByteSize = sizeof(float) * rows * columns; //Copy the matrix to the device float* h_rowMajorSignalMatrix = (float*)malloc(matrixByteSize); float* d_transposedMatrix = d_rowMajorSignalMatrix; hipblasHandle_t cublasHandle; if(transpose) { hipblasCreate(&cublasHandle); hipMalloc(&d_transposedMatrix, matrixByteSize); //Transpose the matrix Device_MatrixTranspose(&cublasHandle, d_rowMajorSignalMatrix, d_transposedMatrix, rows, columns); } CudaUtility_CopySignalToHost(d_transposedMatrix, &h_rowMajorSignalMatrix, sizeof(float) * rows * columns); //Call the host version of this function Utility_WriteSignalMatrixToFile(filename, h_rowMajorSignalMatrix, rows, columns); free(h_rowMajorSignalMatrix); if(transpose) { hipblasDestroy(cublasHandle); hipFree(d_transposedMatrix); } }
b597ef6e4106b99aee4afd65d1a36f7ee16fda39.cu
/* * UtilityFunctions.cu * * Created on: 17/03/2016 * Author: vincentvillani */ #include "../Header/UtilityFunctions.h" #include "../Header/RFIMHelperFunctions.h" #include "../Header/CudaUtilityFunctions.h" #include <cublas_v2.h> //Write a host signal matrix to a file void Utility_WriteSignalMatrixToFile(const std::string filename, float* h_rowMajorSignalMatrix, uint64_t rows, uint64_t columns) { FILE* signalFile = fopen(filename.c_str(), "w"); if(signalFile == NULL) { fprintf(stderr, "WriteSignalMatrixToFile: failed to open %s file\n", filename.c_str()); //exit(1); } for(uint32_t currentRow = 0; currentRow < rows; ++currentRow) { for(uint32_t currentCol = 0; currentCol < columns; ++currentCol) { //If last item in the column, write it without the " " if(currentCol == columns - 1) fprintf(signalFile, "%f", h_rowMajorSignalMatrix[currentRow * columns + currentCol] ); else fprintf(signalFile, "%f ", h_rowMajorSignalMatrix[currentRow * columns + currentCol] ); } //Print a newline for each row except the last one if(currentRow != currentRow - 1) fprintf(signalFile, "\n"); } fclose(signalFile); } void Utility_DeviceWriteSignalMatrixToFile(const std::string filename, float* d_rowMajorSignalMatrix, uint64_t rows, uint64_t columns, bool transpose) { uint32_t matrixByteSize = sizeof(float) * rows * columns; //Copy the matrix to the device float* h_rowMajorSignalMatrix = (float*)malloc(matrixByteSize); float* d_transposedMatrix = d_rowMajorSignalMatrix; cublasHandle_t cublasHandle; if(transpose) { cublasCreate_v2(&cublasHandle); cudaMalloc(&d_transposedMatrix, matrixByteSize); //Transpose the matrix Device_MatrixTranspose(&cublasHandle, d_rowMajorSignalMatrix, d_transposedMatrix, rows, columns); } CudaUtility_CopySignalToHost(d_transposedMatrix, &h_rowMajorSignalMatrix, sizeof(float) * rows * columns); //Call the host version of this function Utility_WriteSignalMatrixToFile(filename, h_rowMajorSignalMatrix, rows, columns); free(h_rowMajorSignalMatrix); if(transpose) { cublasDestroy(cublasHandle); cudaFree(d_transposedMatrix); } }
09d75dabcdcf6aa7982d261266b228147da7f74a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "utilities.cuh" #include <stdio.h> #include <stdlib.h> __global__ void addKernel(int *c, int *a, int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { /* int a[5] = { 1, 2, 3, 4, 5 }; int b[5] = { 10, 20, 30, 40, 50 }; int c[5] = { 0 };*/ int* a, * b, * c; size_t size = 5 * sizeof(int); a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); a[0] = 1; a[1] = 2; a[2] = 3; a[3] = 4; a[4] = 5; b[0] = 10; b[1] = 20; b[2] = 30; b[3] = 40; b[4] = 50; c[0] = 0; c[1] = 0; c[2] = 0; c[3] = 0; c[4] = 0; hipError_t cudaStatus; int* a_d = NULL; int* b_d = NULL; int* c_d = NULL; int** array_h[3] = { &a, &b, &c }; int** array_d[3] = { &a_d, &b_d, &c_d }; int** array[3] = { &a_d, &b_d, &c_d }; cudaStatus = arrayMalloc((void***)array, 3, size); /*hipMemcpy(a_d, a, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b, size, hipMemcpyHostToDevice); hipMemcpy(c_d, c, size, hipMemcpyHostToDevice);*/ /*for (int i = 0; i < 3; i++) { hipMemcpy(*array[i], array_h[i], size, hipMemcpyHostToDevice); if (array_d[i] == NULL) { fprintf(stderr, "array[%d] is NULL\n", i); } }*/ cudaStatus = arraycpyHtoD((void***)array_d, (void***)array_h, 3, size); hipLaunchKernelGGL(( addKernel) , dim3(1), dim3(5) , 0, 0, c_d, a_d, b_d); cudaStatus = onecpyDtoH((void*)c, (void*)c_d, size); printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. /* cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; }*/ return 0; } //// Helper function for using CUDA to add vectors in parallel. //hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) //{ // int *dev_a = 0; // int *dev_b = 0; // int *dev_c = 0; // hipError_t cudaStatus; // // Choose which GPU to run on, change this on a multi-GPU system. // cudaStatus = hipSetDevice(0); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; // } // // // Allocate GPU buffers for three vectors (two input, one output) . // cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMalloc failed!"); // goto Error; // } // // cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMalloc failed!"); // goto Error; // } // // cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMalloc failed!"); // goto Error; // } // // // Copy input vectors from host memory to GPU buffers. // cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy failed!"); // goto Error; // } // // cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy failed!"); // goto Error; // } // // // Launch a kernel on the GPU with one thread for each element. // addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // // // Check for any errors launching the kernel // cudaStatus = hipGetLastError(); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); // goto Error; // } // // // hipDeviceSynchronize waits for the kernel to finish, and returns // // any errors encountered during the launch. // cudaStatus = hipDeviceSynchronize(); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); // goto Error; // } // // // Copy output vector from GPU buffer to host memory. // cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); // if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipMemcpy failed!"); // goto Error; // } // //Error: // hipFree(dev_c); // hipFree(dev_a); // hipFree(dev_b); // // return cudaStatus; //}
09d75dabcdcf6aa7982d261266b228147da7f74a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "utilities.cuh" #include <stdio.h> #include <stdlib.h> __global__ void addKernel(int *c, int *a, int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { /* int a[5] = { 1, 2, 3, 4, 5 }; int b[5] = { 10, 20, 30, 40, 50 }; int c[5] = { 0 };*/ int* a, * b, * c; size_t size = 5 * sizeof(int); a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); a[0] = 1; a[1] = 2; a[2] = 3; a[3] = 4; a[4] = 5; b[0] = 10; b[1] = 20; b[2] = 30; b[3] = 40; b[4] = 50; c[0] = 0; c[1] = 0; c[2] = 0; c[3] = 0; c[4] = 0; cudaError_t cudaStatus; int* a_d = NULL; int* b_d = NULL; int* c_d = NULL; int** array_h[3] = { &a, &b, &c }; int** array_d[3] = { &a_d, &b_d, &c_d }; int** array[3] = { &a_d, &b_d, &c_d }; cudaStatus = arrayMalloc((void***)array, 3, size); /*cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, size, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c, size, cudaMemcpyHostToDevice);*/ /*for (int i = 0; i < 3; i++) { cudaMemcpy(*array[i], array_h[i], size, cudaMemcpyHostToDevice); if (array_d[i] == NULL) { fprintf(stderr, "array[%d] is NULL\n", i); } }*/ cudaStatus = arraycpyHtoD((void***)array_d, (void***)array_h, 3, size); addKernel <<<1, 5 >>> (c_d, a_d, b_d); cudaStatus = onecpyDtoH((void*)c, (void*)c_d, size); printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. /* cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; }*/ return 0; } //// Helper function for using CUDA to add vectors in parallel. //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) //{ // int *dev_a = 0; // int *dev_b = 0; // int *dev_c = 0; // cudaError_t cudaStatus; // // Choose which GPU to run on, change this on a multi-GPU system. // cudaStatus = cudaSetDevice(0); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; // } // // // Allocate GPU buffers for three vectors (two input, one output) . // cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMalloc failed!"); // goto Error; // } // // // Copy input vectors from host memory to GPU buffers. // cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // // cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // // // Launch a kernel on the GPU with one thread for each element. // addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // // // Check for any errors launching the kernel // cudaStatus = cudaGetLastError(); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); // goto Error; // } // // // cudaDeviceSynchronize waits for the kernel to finish, and returns // // any errors encountered during the launch. // cudaStatus = cudaDeviceSynchronize(); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); // goto Error; // } // // // Copy output vector from GPU buffer to host memory. // cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); // if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaMemcpy failed!"); // goto Error; // } // //Error: // cudaFree(dev_c); // cudaFree(dev_a); // cudaFree(dev_b); // // return cudaStatus; //}
3705a916bfe8d77caecbb0e620d16c7e75ab1d25.hip
// !!! This is a file automatically generated by hipify!!! #include <unistd.h> #include "gtest/gtest.h" #include "gpu_helper/gpu_helper.cuh" #include "benchmark/benchmark.h" #include "cli_args.h" unsigned int sort_repeat_count; unsigned int sort_keys_prob_size_in_mb; unsigned int sort_keys_default_prob_size; unsigned int sort_pairs_default_prob_size; int main(int argc, char **argv) { // Make print buffer large enough hipDeviceSetLimit(hipLimitPrintfFifoSize, 1024*1024*4); /***************************************** *** GPU DEVICE OVERVIEW AND SELECTION *** *****************************************/ int deviceCount; hipGetDeviceCount(&deviceCount); int device; printf("\n --- DEVICES ---\n"); for (device = 0; device < deviceCount; ++device) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); printf(" -> device %d (%s) has %d SMs (shared memory: %zu B, registers: %6d) @%d MHz, memory-bus: %d bit @%d MHz (%d GB/s). \n", device, deviceProp.name, deviceProp.multiProcessorCount, deviceProp.sharedMemPerMultiprocessor, deviceProp.regsPerMultiprocessor, deviceProp.clockRate/1000, deviceProp.memoryBusWidth, deviceProp.memoryClockRate/1000, deviceProp.memoryBusWidth/8*(deviceProp.memoryClockRate)*2/1000000); } printf(" --- DEVICES ---\n"); int selected_device = 0; hipSetDevice(selected_device); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, selected_device); printf(" -> Selected device %d (%s) has %d SMs (shared memory: %zu B, registers: %6d) @%d MHz, memory-bus: %d bit @%d MHz (%d GB/s). \n", selected_device, deviceProp.name, deviceProp.multiProcessorCount, deviceProp.sharedMemPerMultiprocessor, deviceProp.regsPerMultiprocessor, deviceProp.clockRate/1000, deviceProp.memoryBusWidth, deviceProp.memoryClockRate/1000, deviceProp.memoryBusWidth/8*(deviceProp.memoryClockRate)*2/1000000); print_gpu_info(); /****************************** *** CLI ARGUMENTS PARSING *** ******************************/ opterr = 0; sort_keys_prob_size_in_mb = 0; sort_keys_default_prob_size = 200000; sort_pairs_default_prob_size = 100000; sort_repeat_count = 3; int c; while ((c = getopt(argc, argv, "r:k:p:s:")) != -1) { switch (c) { case 'r': sort_repeat_count = atoi(optarg); break; case 'k': sort_keys_default_prob_size = atoi(optarg); break; case 'p': sort_pairs_default_prob_size = atoi(optarg); break; case 's': sort_keys_prob_size_in_mb = atoi(optarg); break; default: break; } } /****************************** *** DEFAULT TEST SELECTION *** ******************************/ ::testing::InitGoogleTest(&argc, argv); int test_result = RUN_ALL_TESTS(); /****************************** *** BENCHMARK OVERVIEW *** ******************************/ if(::testing::UnitTest::GetInstance()->test_to_run_count()>0){ printf("\n --- PROFILE OVERVIEW ---\n"); BM_PRINT_ALL_PROFILES('\t'); printf(" --- PROFILE OVERVIEW ---\n"); } return test_result; }
3705a916bfe8d77caecbb0e620d16c7e75ab1d25.cu
#include <unistd.h> #include "gtest/gtest.h" #include "gpu_helper/gpu_helper.cuh" #include "benchmark/benchmark.h" #include "cli_args.h" unsigned int sort_repeat_count; unsigned int sort_keys_prob_size_in_mb; unsigned int sort_keys_default_prob_size; unsigned int sort_pairs_default_prob_size; int main(int argc, char **argv) { // Make print buffer large enough cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 1024*1024*4); /***************************************** *** GPU DEVICE OVERVIEW AND SELECTION *** *****************************************/ int deviceCount; cudaGetDeviceCount(&deviceCount); int device; printf("\n --- DEVICES ---\n"); for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf(" -> device %d (%s) has %d SMs (shared memory: %zu B, registers: %6d) @%d MHz, memory-bus: %d bit @%d MHz (%d GB/s). \n", device, deviceProp.name, deviceProp.multiProcessorCount, deviceProp.sharedMemPerMultiprocessor, deviceProp.regsPerMultiprocessor, deviceProp.clockRate/1000, deviceProp.memoryBusWidth, deviceProp.memoryClockRate/1000, deviceProp.memoryBusWidth/8*(deviceProp.memoryClockRate)*2/1000000); } printf(" --- DEVICES ---\n"); int selected_device = 0; cudaSetDevice(selected_device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, selected_device); printf(" -> Selected device %d (%s) has %d SMs (shared memory: %zu B, registers: %6d) @%d MHz, memory-bus: %d bit @%d MHz (%d GB/s). \n", selected_device, deviceProp.name, deviceProp.multiProcessorCount, deviceProp.sharedMemPerMultiprocessor, deviceProp.regsPerMultiprocessor, deviceProp.clockRate/1000, deviceProp.memoryBusWidth, deviceProp.memoryClockRate/1000, deviceProp.memoryBusWidth/8*(deviceProp.memoryClockRate)*2/1000000); print_gpu_info(); /****************************** *** CLI ARGUMENTS PARSING *** ******************************/ opterr = 0; sort_keys_prob_size_in_mb = 0; sort_keys_default_prob_size = 200000; sort_pairs_default_prob_size = 100000; sort_repeat_count = 3; int c; while ((c = getopt(argc, argv, "r:k:p:s:")) != -1) { switch (c) { case 'r': sort_repeat_count = atoi(optarg); break; case 'k': sort_keys_default_prob_size = atoi(optarg); break; case 'p': sort_pairs_default_prob_size = atoi(optarg); break; case 's': sort_keys_prob_size_in_mb = atoi(optarg); break; default: break; } } /****************************** *** DEFAULT TEST SELECTION *** ******************************/ ::testing::InitGoogleTest(&argc, argv); int test_result = RUN_ALL_TESTS(); /****************************** *** BENCHMARK OVERVIEW *** ******************************/ if(::testing::UnitTest::GetInstance()->test_to_run_count()>0){ printf("\n --- PROFILE OVERVIEW ---\n"); BM_PRINT_ALL_PROFILES('\t'); printf(" --- PROFILE OVERVIEW ---\n"); } return test_result; }
be235cc403472629908825e1a19c63dc06c4593a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by drinkingcoder on 17-10-31. // #include "kernelGaussian.hpp" #include <cuda_by_example/book.h> using namespace cv; double KernelGaussian::gaussian_function(double x) { return 1/sqrt(2*M_PI)*exp(-pow(x,2)/2); } void KernelGaussian::compute_gaussian_kernel(double *gaussian_kernel) { double sum = 0; for(int i=0; i<KERNEL_SIZE; i++) for(int j=0; j<KERNEL_SIZE; j++) { gaussian_kernel[i*KERNEL_SIZE + j] = gaussian_function(fabs(KERNEL_SIZE/2 - i))*gaussian_function(fabs(KERNEL_SIZE/2 - j)); sum += gaussian_kernel[i*KERNEL_SIZE + j]; } double tot = 0; for(int i=0; i<KERNEL_SIZE; i++) for(int j=0; j<KERNEL_SIZE; j++) { gaussian_kernel[i * KERNEL_SIZE + j] /= sum; tot += gaussian_kernel[i * KERNEL_SIZE + j]; } } __global__ static void kernel(unsigned char *result, unsigned char *ptr, double *gaussian_kernel) { int x = blockIdx.x*blockDim.x+threadIdx.x; if( x >= IMAGESIZE_WIDTH-KERNEL_SIZE ) return; int y = blockIdx.y*blockDim.y+threadIdx.y; if( y >= IMAGESIZE_HEIGHT-KERNEL_SIZE ) return; int offset = x+y*IMAGESIZE_WIDTH; for(int channel = 0;channel<3;channel++) { double tmp_result = 0; for (int i = 0; i < KERNEL_SIZE; i++) for (int j = 0; j < KERNEL_SIZE; j++) tmp_result += ptr[channel + (offset + i * IMAGESIZE_WIDTH + j)*3] * gaussian_kernel[i * KERNEL_SIZE + j]; result[offset*3 + channel ] = (unsigned char) (tmp_result); } } void KernelGaussian::preparation() { m_gaussianKernel = new double[KERNEL_SIZE*KERNEL_SIZE]; compute_gaussian_kernel(m_gaussianKernel); HANDLE_ERROR( hipMalloc((void**)&m_devGaussianKernel, KERNEL_SIZE*KERNEL_SIZE*sizeof(double)) ); HANDLE_ERROR( hipMemcpy(m_devGaussianKernel,m_gaussianKernel,KERNEL_SIZE*KERNEL_SIZE*sizeof(double),hipMemcpyHostToDevice) ); Mat host_input_image = imread("Image.jpg"); host_input_image.convertTo(host_input_image,CV_8UC3); resize(host_input_image,host_input_image,cv::Size(IMAGESIZE_WIDTH,IMAGESIZE_HEIGHT)); HANDLE_ERROR( hipMalloc((void**)&m_devInputBitmap, 3*IMAGESIZE_HEIGHT*IMAGESIZE_WIDTH) ); HANDLE_ERROR( hipMemcpy(m_devInputBitmap, host_input_image.data, 3*IMAGESIZE_HEIGHT*IMAGESIZE_WIDTH, hipMemcpyHostToDevice) ); HANDLE_ERROR( hipMalloc((void**)&m_devResultBitmap, 3*host_input_image.rows*host_input_image.cols) ); } void KernelGaussian::awakeKernel() { #define BLOCKSIZE 32 #define GRIDSIZEX (IMAGESIZE_WIDTH/BLOCKSIZE+1) #define GRIDSIZEY (IMAGESIZE_HEIGHT/BLOCKSIZE+1) dim3 gridDim(GRIDSIZEX,GRIDSIZEY); dim3 blockDim(BLOCKSIZE,BLOCKSIZE); hipLaunchKernelGGL(( kernel), dim3(gridDim),dim3(blockDim), 0, 0, m_devResultBitmap,m_devInputBitmap,m_devGaussianKernel); } void KernelGaussian::postProcessing(){ HANDLE_ERROR( hipMemcpy(m_resultImage.data,m_devResultBitmap,3*IMAGESIZE_WIDTH*IMAGESIZE_HEIGHT, hipMemcpyDeviceToHost) ); // imwrite("Kernel9x9.jpg",m_resultImage); imshow("Image after blurred",m_resultImage); waitKey(); HANDLE_ERROR( hipFree(m_devInputBitmap) ); HANDLE_ERROR( hipFree(m_devResultBitmap) ); HANDLE_ERROR( hipFree(m_devGaussianKernel) ); }
be235cc403472629908825e1a19c63dc06c4593a.cu
// // Created by drinkingcoder on 17-10-31. // #include "kernelGaussian.hpp" #include <cuda_by_example/book.h> using namespace cv; double KernelGaussian::gaussian_function(double x) { return 1/sqrt(2*M_PI)*exp(-pow(x,2)/2); } void KernelGaussian::compute_gaussian_kernel(double *gaussian_kernel) { double sum = 0; for(int i=0; i<KERNEL_SIZE; i++) for(int j=0; j<KERNEL_SIZE; j++) { gaussian_kernel[i*KERNEL_SIZE + j] = gaussian_function(fabs(KERNEL_SIZE/2 - i))*gaussian_function(fabs(KERNEL_SIZE/2 - j)); sum += gaussian_kernel[i*KERNEL_SIZE + j]; } double tot = 0; for(int i=0; i<KERNEL_SIZE; i++) for(int j=0; j<KERNEL_SIZE; j++) { gaussian_kernel[i * KERNEL_SIZE + j] /= sum; tot += gaussian_kernel[i * KERNEL_SIZE + j]; } } __global__ static void kernel(unsigned char *result, unsigned char *ptr, double *gaussian_kernel) { int x = blockIdx.x*blockDim.x+threadIdx.x; if( x >= IMAGESIZE_WIDTH-KERNEL_SIZE ) return; int y = blockIdx.y*blockDim.y+threadIdx.y; if( y >= IMAGESIZE_HEIGHT-KERNEL_SIZE ) return; int offset = x+y*IMAGESIZE_WIDTH; for(int channel = 0;channel<3;channel++) { double tmp_result = 0; for (int i = 0; i < KERNEL_SIZE; i++) for (int j = 0; j < KERNEL_SIZE; j++) tmp_result += ptr[channel + (offset + i * IMAGESIZE_WIDTH + j)*3] * gaussian_kernel[i * KERNEL_SIZE + j]; result[offset*3 + channel ] = (unsigned char) (tmp_result); } } void KernelGaussian::preparation() { m_gaussianKernel = new double[KERNEL_SIZE*KERNEL_SIZE]; compute_gaussian_kernel(m_gaussianKernel); HANDLE_ERROR( cudaMalloc((void**)&m_devGaussianKernel, KERNEL_SIZE*KERNEL_SIZE*sizeof(double)) ); HANDLE_ERROR( cudaMemcpy(m_devGaussianKernel,m_gaussianKernel,KERNEL_SIZE*KERNEL_SIZE*sizeof(double),cudaMemcpyHostToDevice) ); Mat host_input_image = imread("Image.jpg"); host_input_image.convertTo(host_input_image,CV_8UC3); resize(host_input_image,host_input_image,cv::Size(IMAGESIZE_WIDTH,IMAGESIZE_HEIGHT)); HANDLE_ERROR( cudaMalloc((void**)&m_devInputBitmap, 3*IMAGESIZE_HEIGHT*IMAGESIZE_WIDTH) ); HANDLE_ERROR( cudaMemcpy(m_devInputBitmap, host_input_image.data, 3*IMAGESIZE_HEIGHT*IMAGESIZE_WIDTH, cudaMemcpyHostToDevice) ); HANDLE_ERROR( cudaMalloc((void**)&m_devResultBitmap, 3*host_input_image.rows*host_input_image.cols) ); } void KernelGaussian::awakeKernel() { #define BLOCKSIZE 32 #define GRIDSIZEX (IMAGESIZE_WIDTH/BLOCKSIZE+1) #define GRIDSIZEY (IMAGESIZE_HEIGHT/BLOCKSIZE+1) dim3 gridDim(GRIDSIZEX,GRIDSIZEY); dim3 blockDim(BLOCKSIZE,BLOCKSIZE); kernel<<<gridDim,blockDim>>>(m_devResultBitmap,m_devInputBitmap,m_devGaussianKernel); } void KernelGaussian::postProcessing(){ HANDLE_ERROR( cudaMemcpy(m_resultImage.data,m_devResultBitmap,3*IMAGESIZE_WIDTH*IMAGESIZE_HEIGHT, cudaMemcpyDeviceToHost) ); // imwrite("Kernel9x9.jpg",m_resultImage); imshow("Image after blurred",m_resultImage); waitKey(); HANDLE_ERROR( cudaFree(m_devInputBitmap) ); HANDLE_ERROR( cudaFree(m_devResultBitmap) ); HANDLE_ERROR( cudaFree(m_devGaussianKernel) ); }
6e94d1b9d92c6c6e721805eca2375f8b2fed8f0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include "cuda_reduction.h" __global__ void reduction_sum(int N, float* X, float* reducted_vec) { extern __shared__ float reduction_cache[] ; //thread ID on each row of blocks int tid = blockDim.x * blockIdx.x + threadIdx.x; int cache_i = threadIdx.x; /* This UNROLLS the elements of x, "outside" the grid's index range. In the case of N=600, threadsPerBlock=256 and 2 blocks in total, we have 600-256*2=88 additions done in parallel, before the reduction of the 512 threads. incase the index-range > N, the reduction scheme will simply add some zeros to the vector. This allows as to oversubscribe in terms of threads and blocks. */ int offset = N*blockIdx.y; float temp=0; while (tid < N) { temp += X[tid+offset]; tid += blockDim.x * gridDim.x; } /* Load x-data into local shared memory. As mentioned before, some entries are small sums of x's outside the grid's range */ reduction_cache[cache_i] = temp; __syncthreads(); // Begin the reduction per shared-memory-block for(int i=blockDim.x/2; i>0; i>>=1) { if(cache_i < i) reduction_cache[cache_i] += reduction_cache[cache_i+i]; __syncthreads(); } // Unroll Last warp /*if(cache_i>32) { reduction_cache[cache_i] += reduction_cache[cache_i+32]; reduction_cache[cache_i] += reduction_cache[cache_i+16]; reduction_cache[cache_i] += reduction_cache[cache_i+8]; reduction_cache[cache_i] += reduction_cache[cache_i+4]; reduction_cache[cache_i] += reduction_cache[cache_i+2]; reduction_cache[cache_i] += reduction_cache[cache_i+1]; }*/ // Final Sum is stored in global array. if(cache_i==0) reducted_vec[blockIdx.y*gridDim.x + blockIdx.x] = reduction_cache[0]; } void init_reduction_cache(int rowLength, int rowNum, int threads_num, /*ouit*/ ReductionCache* rc) { rc->blockDim.x = threads_num; rc->blockDim.y = 1; rc->blockDim.z = 1; int blocks_num = ceil(rowLength/threads_num); if(blocks_num==0) blocks_num=1; rc->blocksNum = blocks_num; rc->gridDim.x = blocks_num; rc->gridDim.y = rowNum; // One row of block for each matrix row rc->gridDim.z = 1; rc->rowNum = rowNum; rc->reduced_vec_length = rowNum*blocks_num; // ronNum * (number of blocks per row) rc->cache_size = rowNum*threads_num*sizeof(float); if(rc->cache_size > 1024*16) // cache > 16 KB. CUCA 1.x allows max sm 16 per MP printf("[WARNING]:\t[INIT_REDUCTION_CACHE]:\t \ Shared Memory size too large: %lu\n",\ rc->cache_size); if(blocks_num>1) hipMalloc((void**) &(rc->d_reduced_vec), rc->reduced_vec_length*sizeof(float)); // This is not needed in this case. As reduction cache, d_sum can also be used. hipMalloc((void**) &(rc->d_sum), rowNum*sizeof(float)); } void delete_reduction_cache(ReductionCache* reductionCache) { if(reductionCache->blocksNum>1) hipFree(reductionCache->d_reduced_vec); hipFree(reductionCache->d_sum); } void WR_reduction(int N, float* d_A, /*out*/ ReductionCache* rc ) { if(rc->blocksNum == 1) { // We need only one reduction call! hipLaunchKernelGGL(( reduction_sum) , dim3(rc->gridDim), dim3(rc->blockDim), rc->cache_size, 0, N, d_A, rc->d_sum); //no need for the d_reduction cache } else { // We need multiple reduction calls! hipLaunchKernelGGL(( reduction_sum) , dim3(rc->gridDim), dim3(rc->blockDim), rc->cache_size, 0, N, d_A, rc->d_reduced_vec); /* Reduct the final reduction vector! */ /* Ideally we would like threads_num==length(reduced_vec)/numRow. However threads_num2 must be a power of 2. Thus: */ int threads_num2 = exp2f(floor(log2f(rc->reduced_vec_length/rc->rowNum))); if(threads_num2>512) threads_num2=512; //printf("THREADS: %d RED_VEC %d\n", threads_num2, rc->reduced_vec_length/rc->rowNum ); dim3 gridDim2(1,rc->rowNum,1); dim3 blockDim2(threads_num2,1,1); hipLaunchKernelGGL(( reduction_sum), dim3(gridDim2), dim3(blockDim2), threads_num2*sizeof(float), 0, \ rc->gridDim.x, rc->d_reduced_vec, rc->d_sum); // // WARNING: launching with original thread_num might be too much. // SOLUTION: Find power-of-2 nearest to block_num } }
6e94d1b9d92c6c6e721805eca2375f8b2fed8f0a.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "cuda_reduction.h" __global__ void reduction_sum(int N, float* X, float* reducted_vec) { extern __shared__ float reduction_cache[] ; //thread ID on each row of blocks int tid = blockDim.x * blockIdx.x + threadIdx.x; int cache_i = threadIdx.x; /* This UNROLLS the elements of x, "outside" the grid's index range. In the case of N=600, threadsPerBlock=256 and 2 blocks in total, we have 600-256*2=88 additions done in parallel, before the reduction of the 512 threads. incase the index-range > N, the reduction scheme will simply add some zeros to the vector. This allows as to oversubscribe in terms of threads and blocks. */ int offset = N*blockIdx.y; float temp=0; while (tid < N) { temp += X[tid+offset]; tid += blockDim.x * gridDim.x; } /* Load x-data into local shared memory. As mentioned before, some entries are small sums of x's outside the grid's range */ reduction_cache[cache_i] = temp; __syncthreads(); // Begin the reduction per shared-memory-block for(int i=blockDim.x/2; i>0; i>>=1) { if(cache_i < i) reduction_cache[cache_i] += reduction_cache[cache_i+i]; __syncthreads(); } // Unroll Last warp /*if(cache_i>32) { reduction_cache[cache_i] += reduction_cache[cache_i+32]; reduction_cache[cache_i] += reduction_cache[cache_i+16]; reduction_cache[cache_i] += reduction_cache[cache_i+8]; reduction_cache[cache_i] += reduction_cache[cache_i+4]; reduction_cache[cache_i] += reduction_cache[cache_i+2]; reduction_cache[cache_i] += reduction_cache[cache_i+1]; }*/ // Final Sum is stored in global array. if(cache_i==0) reducted_vec[blockIdx.y*gridDim.x + blockIdx.x] = reduction_cache[0]; } void init_reduction_cache(int rowLength, int rowNum, int threads_num, /*ouit*/ ReductionCache* rc) { rc->blockDim.x = threads_num; rc->blockDim.y = 1; rc->blockDim.z = 1; int blocks_num = ceil(rowLength/threads_num); if(blocks_num==0) blocks_num=1; rc->blocksNum = blocks_num; rc->gridDim.x = blocks_num; rc->gridDim.y = rowNum; // One row of block for each matrix row rc->gridDim.z = 1; rc->rowNum = rowNum; rc->reduced_vec_length = rowNum*blocks_num; // ronNum * (number of blocks per row) rc->cache_size = rowNum*threads_num*sizeof(float); if(rc->cache_size > 1024*16) // cache > 16 KB. CUCA 1.x allows max sm 16 per MP printf("[WARNING]:\t[INIT_REDUCTION_CACHE]:\t \ Shared Memory size too large: %lu\n",\ rc->cache_size); if(blocks_num>1) cudaMalloc((void**) &(rc->d_reduced_vec), rc->reduced_vec_length*sizeof(float)); // This is not needed in this case. As reduction cache, d_sum can also be used. cudaMalloc((void**) &(rc->d_sum), rowNum*sizeof(float)); } void delete_reduction_cache(ReductionCache* reductionCache) { if(reductionCache->blocksNum>1) cudaFree(reductionCache->d_reduced_vec); cudaFree(reductionCache->d_sum); } void WR_reduction(int N, float* d_A, /*out*/ ReductionCache* rc ) { if(rc->blocksNum == 1) { // We need only one reduction call! reduction_sum <<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N, d_A, rc->d_sum); //no need for the d_reduction cache } else { // We need multiple reduction calls! reduction_sum <<<rc->gridDim, rc->blockDim, rc->cache_size>>>(N, d_A, rc->d_reduced_vec); /* Reduct the final reduction vector! */ /* Ideally we would like threads_num==length(reduced_vec)/numRow. However threads_num2 must be a power of 2. Thus: */ int threads_num2 = exp2f(floor(log2f(rc->reduced_vec_length/rc->rowNum))); if(threads_num2>512) threads_num2=512; //printf("THREADS: %d RED_VEC %d\n", threads_num2, rc->reduced_vec_length/rc->rowNum ); dim3 gridDim2(1,rc->rowNum,1); dim3 blockDim2(threads_num2,1,1); reduction_sum<<<gridDim2, blockDim2, threads_num2*sizeof(float)>>>\ (rc->gridDim.x, rc->d_reduced_vec, rc->d_sum); // // WARNING: launching with original thread_num might be too much. // SOLUTION: Find power-of-2 nearest to block_num } }
8510709a1bd849db5f0d3e86571c402991f86e27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "img_process.hpp" #include <fstream> #include <stdio.h> #include <opencv2/opencv.hpp> //#define __OUTPUT_PIX__ #define BLOCK_SIZE 32 __constant__ __device__ float lTable_const[1064]; __constant__ __device__ float mr_const[3]; __constant__ __device__ float mg_const[3]; __constant__ __device__ float mb_const[3]; #define FATAL(msg, ...) \ do {\ fprintf(stderr, "[%s:%d] "msg"\n", __FILE__, __LINE__, ##__VA_ARGS__);\ exit(-1);\ } while(0) img_process::img_process() { } img_process::~img_process() { } /*****************************************************************************/ /* CUDA KERNELS */ /*****************************************************************************/ __global__ void convert_to_luv_gpu_kernel(unsigned char *in_img, float *out_img, int cols, int rows, bool use_rgb) { float r, g, b, l, u, v, x, y, z, lt; unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < cols) && (y_pos < rows)) { unsigned int pos = (y_pos * cols) + x_pos; if (use_rgb) { r = (float)in_img[(3 * pos)]; g = (float)in_img[(3 * pos) + 1]; b = (float)in_img[(3 * pos) + 2]; } else { b = (float)in_img[(3 * pos)]; g = (float)in_img[(3 * pos) + 1]; r = (float)in_img[(3 * pos) + 2]; } x = (mr_const[0] * r) + (mg_const[0] * g) + (mb_const[0] * b); y = (mr_const[1] * r) + (mg_const[1] * g) + (mb_const[1] * b); z = (mr_const[2] * r) + (mg_const[2] * g) + (mb_const[2] * b); float maxi = 1.0f / 270; float minu = -88.0f * maxi; float minv = -134.0f * maxi; float un = 0.197833f; float vn = 0.468331f; lt = lTable_const[static_cast<int>((y*1024))]; l = lt; z = 1/(x + (15 * y) + (3 * z) + (float)1e-35); u = lt * (13 * 4 * x * z - 13 * un) - minu; v = lt * (13 * 9 * y * z - 13 * vn) - minv; out_img[(3 * pos)] = l; out_img[(3 * pos) + 1] = u; out_img[(3 * pos) + 2] = v; } } __global__ void trianguler_convolution_gpu_kernel(float *dev_I, float *dev_O, float *T0, float *T1, float *T2, int wd, int ht, float nrm, float p) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < wd) && (y_pos < ht)) { float *It0, *It1, *It2, *Im0, *Im1, *Im2, *Ib0, *Ib1, *Ib2; float *Ot0, *Ot1, *Ot2; float *T00, *T10, *T20; It0 = Im0 = Ib0 = dev_I + (y_pos * wd) + (0 * ht * wd); It1 = Im1 = Ib1 = dev_I + (y_pos * wd) + (1 * ht * wd); It2 = Im2 = Ib2 = dev_I + (y_pos * wd) + (2 * ht * wd); Ot0 = dev_O + (y_pos * wd) + (0 * ht * wd); Ot1 = dev_O + (y_pos * wd) + (1 * ht * wd); Ot2 = dev_O + (y_pos * wd) + (2 * ht * wd); T00 = T0 + (y_pos * wd); T10 = T1 + (y_pos * wd); T20 = T2 + (y_pos * wd); if(y_pos > 0) { /// not the first row, let It point to previous row It0 -= wd; It1 -= wd; It2 -= wd; } if(y_pos < ht - 1) { /// not the last row, let Ib point to next row Ib0 += wd; Ib1 += wd; Ib2 += wd; } T00[x_pos] = nrm * (It0[x_pos] + (p * Im0[x_pos]) + Ib0[x_pos]); T10[x_pos] = nrm * (It1[x_pos] + (p * Im1[x_pos]) + Ib1[x_pos]); T20[x_pos] = nrm * (It2[x_pos] + (p * Im2[x_pos]) + Ib2[x_pos]); __syncthreads(); if (x_pos == 0) { Ot0[x_pos] = ((1 + p) * T00[x_pos]) + T00[x_pos + 1]; Ot1[x_pos] = ((1 + p) * T10[x_pos]) + T10[x_pos + 1]; Ot2[x_pos] = ((1 + p) * T20[x_pos]) + T20[x_pos + 1]; } else if (x_pos == wd - 1) { Ot0[x_pos] = T00[x_pos - 1] + ((1 + p) * T00[x_pos]); Ot1[x_pos] = T10[x_pos - 1] + ((1 + p) * T10[x_pos]); Ot2[x_pos] = T20[x_pos - 1] + ((1 + p) * T20[x_pos]); } else { Ot0[x_pos] = T00[x_pos - 1] + (p * T00[x_pos]) + T00[x_pos + 1]; Ot1[x_pos] = T10[x_pos - 1] + (p * T10[x_pos]) + T10[x_pos + 1]; Ot2[x_pos] = T20[x_pos - 1] + (p * T20[x_pos]) + T20[x_pos + 1]; } __syncthreads(); } } __global__ void lin2lin_resmpl_good_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int *yas_const, int *ybs_const) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < dst_wd) && (y_pos < dst_ht)) { int ya, yb; float *A00, *A01, *A02, *A03, *B00; float *A10, *A11, *A12, *A13, *B10; float *A20, *A21, *A22, *A23, *B20; float *A0 = dev_in_img + (0 * org_ht * org_wd); float *B0 = dev_out_img + (0 * dst_ht * dst_wd); float *A1 = dev_in_img + (1 * org_ht * org_wd); float *B1 = dev_out_img + (1 * dst_ht * dst_wd); float *A2 = dev_in_img + (2 * org_ht * org_wd); float *B2 = dev_out_img + (2 * dst_ht * dst_wd); if (org_ht == dst_ht && org_wd == dst_wd) { int out_img_idx = y_pos + (dst_wd * x_pos); B0[out_img_idx] = A0[out_img_idx * n_channels]; B1[out_img_idx] = A1[out_img_idx * n_channels]; B2[out_img_idx] = A2[out_img_idx * n_channels]; return; } int y1 = 0; if (org_ht == 2 * dst_ht) { y1 += 2 * y_pos; } else if (org_ht == 3 * dst_ht) { y1 += 3 * y_pos; } else if (org_ht == 4 * dst_ht) { y1 += 4 * y_pos; } if (y_pos == 0) y1 = 0; ya = yas_const[y1]; A00 = A0 + (ya * org_wd); A01 = A00 + (org_wd); A02 = A01 + (org_wd); A03 = A02 + (org_wd); A10 = A1 + (ya * org_wd); A11 = A00 + (org_wd); A12 = A01 + (org_wd); A13 = A02 + (org_wd); A20 = A2 + (ya * org_wd); A21 = A00 + (org_wd); A22 = A01 + (org_wd); A23 = A02 + (org_wd); yb = ybs_const[y1]; B00 = B0 + (yb * dst_wd); B10 = B1 + (yb * dst_wd); B20 = B2 + (yb * dst_wd); // resample along y direction if (org_ht == 2 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos]; dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos]; dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos]; } else if (org_ht == 3 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos] + A02[x_pos]; dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos] + A12[x_pos]; dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos] + A22[x_pos]; } else if (org_ht == 4 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos] + A02[x_pos] + A03[x_pos]; dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos] + A12[x_pos] + A13[x_pos]; dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos] + A22[x_pos] + A23[x_pos]; } /* ensure that all threads have calculated the values for C until this point */ __syncthreads(); // resample along x direction (B -> C) if (org_wd == 2 * dst_wd) { B00[x_pos]= (dev_C0_tmp[2 * x_pos] + dev_C0_tmp[(2 * x_pos) + 1]) * (r / 2); B10[x_pos]= (dev_C1_tmp[2 * x_pos] + dev_C1_tmp[(2 * x_pos) + 1]) * (r / 2); B20[x_pos]= (dev_C2_tmp[2 * x_pos] + dev_C2_tmp[(2 * x_pos) + 1]) * (r / 2); } else if (org_wd == 3 * dst_wd) { B00[x_pos] = (dev_C0_tmp[3 * x_pos] + dev_C0_tmp[(3 * x_pos) + 1] + dev_C0_tmp[(3 * x_pos) + 2]) * (r / 3); B10[x_pos] = (dev_C1_tmp[3 * x_pos] + dev_C1_tmp[(3 * x_pos) + 1] + dev_C1_tmp[(3 * x_pos) + 2]) * (r / 3); B20[x_pos] = (dev_C2_tmp[3 * x_pos] + dev_C2_tmp[(3 * x_pos) + 1] + dev_C2_tmp[(3 * x_pos) + 2]) * (r / 3); } else if (org_wd == 4 * dst_wd) { B00[x_pos] = (dev_C0_tmp[4 * x_pos] + dev_C0_tmp[(4 * x_pos) + 1] + dev_C0_tmp[(4 * x_pos) + 2] + dev_C0_tmp[(4 * x_pos) + 3]) * (r / 4); B10[x_pos] = (dev_C1_tmp[4 * x_pos] + dev_C1_tmp[(4 * x_pos) + 1] + dev_C1_tmp[(4 * x_pos) + 2] + dev_C1_tmp[(4 * x_pos) + 3]) * (r / 4); B20[x_pos] = (dev_C2_tmp[4 * x_pos] + dev_C2_tmp[(4 * x_pos) + 1] + dev_C2_tmp[(4 * x_pos) + 2] + dev_C2_tmp[(4 * x_pos) + 3]) * (r / 4); } __syncthreads(); } } __global__ void lin2lin_resmpl_messy_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int hn, int wn, int xbd0, int xbd1, int ybd0, int ybd1, int *xas_const, int *xbs_const, float *xwts_const, int *yas_const, int *ybs_const, float *ywts_const) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < dst_wd) && (y_pos < dst_ht)) { int xa, ya, yb; float wt, wt1; float *A00, *A01, *A02, *A03, *B00; float *A10, *A11, *A12, *A13, *B10; float *A20, *A21, *A22, *A23, *B20; float *A0 = dev_in_img + 0; float *B0 = dev_out_img + (0 * dst_ht * dst_wd); float *A1 = dev_in_img + 1; float *B1 = dev_out_img + (1 * dst_ht * dst_wd); float *A2 = dev_in_img + 2; float *B2 = dev_out_img + (2 * dst_ht * dst_wd); int y1 = 0; if (org_ht > dst_ht) { int m = 1; for (int iter = 0; iter < y_pos; iter++) { while (y1 + m < hn && yb == ybs_const[y1 + m]) m++; y1 += m; } wt = ywts_const[y1]; wt1 = 1 - wt; } else { y1 = y_pos; wt = ywts_const[y1]; wt1 = 1 - wt; } if (y_pos == 0) y1 = 0; ya = yas_const[y1]; A00 = A0 + (ya * org_wd * n_channels); A01 = A00 + (org_wd * n_channels); A02 = A01 + (org_wd * n_channels); A03 = A02 + (org_wd * n_channels); A10 = A1 + (ya * org_wd * n_channels); A11 = A00 + (org_wd * n_channels); A12 = A01 + (org_wd * n_channels); A13 = A02 + (org_wd * n_channels); A20 = A2 + (ya * org_wd * n_channels); A21 = A00 + (org_wd * n_channels); A22 = A01 + (org_wd * n_channels); A23 = A02 + (org_wd * n_channels); yb = ybs_const[y1]; B00 = B0 + (yb * dst_wd); B10 = B1 + (yb * dst_wd); B20 = B2 + (yb * dst_wd); int x = 0; if (org_wd < x_pos) { // resample along y direction if (org_ht > dst_ht) { int m = 1; while ((y1 + m < hn) && (yb == ybs_const[y1 + m])) m++; if (m == 1) { dev_C0_tmp[x_pos] = A00[x_pos] * ywts_const[y1]; dev_C1_tmp[x_pos] = A10[x_pos] * ywts_const[y1]; dev_C2_tmp[x_pos] = A20[x_pos] * ywts_const[y1]; } else if (m == 2) { dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) + (A01[x_pos] * ywts_const[y1 + 1]); dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) + (A11[x_pos] * ywts_const[y1 + 1]); dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) + (A21[x_pos] * ywts_const[y1 + 1]); } else if (m == 3) { dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) + (A01[x_pos] * ywts_const[y1 + 1]) + (A02[x_pos] * ywts_const[y1 + 2]); dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) + (A11[x_pos] * ywts_const[y1 + 1]) + (A12[x_pos] * ywts_const[y1 + 2]); dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) + (A21[x_pos] * ywts_const[y1 + 1]) + (A22[x_pos] * ywts_const[y1 + 2]); } else if (m >= 4) { dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) + (A01[x_pos] * ywts_const[y1 + 1]) + (A02[x_pos] * ywts_const[y1 + 2]) + (A03[x_pos] * ywts_const[y1 + 3]); dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) + (A11[x_pos] * ywts_const[y1 + 1]) + (A12[x_pos] * ywts_const[y1 + 2]) + (A13[x_pos] * ywts_const[y1 + 3]); dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) + (A21[x_pos] * ywts_const[y1 + 1]) + (A22[x_pos] * ywts_const[y1 + 2]) + (A23[x_pos] * ywts_const[y1 + 3]); } for (int y0 = 4; y0 < m; y0++) { A01 = A00 + (y0 * org_wd); A11 = A10 + (y0 * org_wd); A11 = A10 + (y0 * org_wd); wt1 = ywts_const[y1 + y0]; dev_C0_tmp[x_pos] = dev_C0_tmp[x_pos] + (A01[x_pos] * wt1); dev_C1_tmp[x_pos] = dev_C1_tmp[x_pos] + (A11[x_pos] * wt1); dev_C2_tmp[x_pos] = dev_C2_tmp[x_pos] + (A21[x_pos] * wt1); } } else { bool yBd = y_pos < ybd0 || y_pos >= dst_ht - ybd1; if (yBd) { dev_C0_tmp[x_pos] = A00[x_pos]; dev_C1_tmp[x_pos] = A10[x_pos]; dev_C2_tmp[x_pos] = A20[x_pos]; } else { dev_C0_tmp[x_pos] = (A00[x_pos] * wt) + (A01[x_pos] * wt1); dev_C1_tmp[x_pos] = (A10[x_pos] * wt) + (A11[x_pos] * wt1); dev_C2_tmp[x_pos] = (A20[x_pos] * wt) + (A21[x_pos] * wt1); } } } /* ensure that all threads have calculated the values for C until this point */ __syncthreads(); // resample along x direction (B -> C) if (x_pos < dst_wd) { if (org_wd > dst_wd) { if (xbd0 == 2) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); } else if (xbd0 == 3) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); } else if (xbd0 == 4) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C0_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C1_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C2_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); } else if (xbd0 > 4) { for(x = 0; x < wn; x++) { B00[xbs_const[x]] += dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[xbs_const[x]] += dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[xbs_const[x]] += dev_C2_tmp[xas_const[x]] * xwts_const[x]; } } } else { for (x = 0; x < xbd0; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x]; } for (; x < dst_wd - xbd1; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x] + dev_C0_tmp[xas_const[x] + 1] * (r - xwts_const[x]); B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x] + dev_C1_tmp[xas_const[x] + 1] * (r - xwts_const[x]); B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x] + dev_C2_tmp[xas_const[x] + 1] * (r - xwts_const[x]); } for (; x < dst_wd; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x]; } } } __syncthreads(); } } __global__ void int2lin_resmpl_good_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int *yas_const, int *ybs_const) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < dst_wd) && (y_pos < dst_ht)) { int ya, yb; float *A00, *A01, *A02, *A03, *B00; float *A10, *A11, *A12, *A13, *B10; float *A20, *A21, *A22, *A23, *B20; float *A0 = dev_in_img + 0; float *B0 = dev_out_img + (0 * dst_ht * dst_wd); float *A1 = dev_in_img + 1; float *B1 = dev_out_img + (1 * dst_ht * dst_wd); float *A2 = dev_in_img + 2; float *B2 = dev_out_img + (2 * dst_ht * dst_wd); if (org_ht == dst_ht && org_wd == dst_wd) { int out_img_idx = y_pos + (dst_wd * x_pos); B0[out_img_idx] = A0[out_img_idx * n_channels]; B1[out_img_idx] = A1[out_img_idx * n_channels]; B2[out_img_idx] = A2[out_img_idx * n_channels]; return; } int y1 = 0; if (org_ht == 2 * dst_ht) { y1 += 2 * y_pos; } else if (org_ht == 3 * dst_ht) { y1 += 3 * y_pos; } else if (org_ht == 4 * dst_ht) { y1 += 4 * y_pos; } if (y_pos == 0) y1 = 0; ya = yas_const[y1]; A00 = A0 + (ya * org_wd * n_channels); A01 = A00 + (org_wd * n_channels); A02 = A01 + (org_wd * n_channels); A03 = A02 + (org_wd * n_channels); A10 = A1 + (ya * org_wd * n_channels); A11 = A00 + (org_wd * n_channels); A12 = A01 + (org_wd * n_channels); A13 = A02 + (org_wd * n_channels); A20 = A2 + (ya * org_wd * n_channels); A21 = A00 + (org_wd * n_channels); A22 = A01 + (org_wd * n_channels); A23 = A02 + (org_wd * n_channels); yb = ybs_const[y1]; B00 = B0 + (yb * dst_wd); B10 = B1 + (yb * dst_wd); B20 = B2 + (yb * dst_wd); // resample along y direction if (org_ht == 2 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels] + A01[x_pos * n_channels]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels] + A11[x_pos * n_channels]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels] + A21[x_pos * n_channels]; } else if (org_ht == 3 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels] + A01[x_pos * n_channels] + A02[x_pos * n_channels]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels] + A11[x_pos * n_channels] + A12[x_pos * n_channels]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels] + A21[x_pos * n_channels] + A22[x_pos * n_channels]; } else if (org_ht == 4 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels] + A01[x_pos * n_channels] + A02[x_pos * n_channels] + A03[x_pos * n_channels]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels] + A11[x_pos * n_channels] + A12[x_pos * n_channels] + A13[x_pos * n_channels]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels] + A21[x_pos * n_channels] + A22[x_pos * n_channels] + A23[x_pos * n_channels]; } /* ensure that all threads have calculated the values for C until this point */ __syncthreads(); // resample along x direction (B -> C) if (org_wd == 2 * dst_wd) { B00[x_pos]= (dev_C0_tmp[2 * x_pos] + dev_C0_tmp[(2 * x_pos) + 1]) * (r / 2); B10[x_pos]= (dev_C1_tmp[2 * x_pos] + dev_C1_tmp[(2 * x_pos) + 1]) * (r / 2); B20[x_pos]= (dev_C2_tmp[2 * x_pos] + dev_C2_tmp[(2 * x_pos) + 1]) * (r / 2); } else if (org_wd == 3 * dst_wd) { B00[x_pos] = (dev_C0_tmp[3 * x_pos] + dev_C0_tmp[(3 * x_pos) + 1] + dev_C0_tmp[(3 * x_pos) + 2]) * (r / 3); B10[x_pos] = (dev_C1_tmp[3 * x_pos] + dev_C1_tmp[(3 * x_pos) + 1] + dev_C1_tmp[(3 * x_pos) + 2]) * (r / 3); B20[x_pos] = (dev_C2_tmp[3 * x_pos] + dev_C2_tmp[(3 * x_pos) + 1] + dev_C2_tmp[(3 * x_pos) + 2]) * (r / 3); } else if (org_wd == 4 * dst_wd) { B00[x_pos] = (dev_C0_tmp[4 * x_pos] + dev_C0_tmp[(4 * x_pos) + 1] + dev_C0_tmp[(4 * x_pos) + 2] + dev_C0_tmp[(4 * x_pos) + 3]) * (r / 4); B10[x_pos] = (dev_C1_tmp[4 * x_pos] + dev_C1_tmp[(4 * x_pos) + 1] + dev_C1_tmp[(4 * x_pos) + 2] + dev_C1_tmp[(4 * x_pos) + 3]) * (r / 4); B20[x_pos] = (dev_C2_tmp[4 * x_pos] + dev_C2_tmp[(4 * x_pos) + 1] + dev_C2_tmp[(4 * x_pos) + 2] + dev_C2_tmp[(4 * x_pos) + 3]) * (r / 4); } __syncthreads(); } } __global__ void int2lin_resmpl_messy_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int hn, int wn, int xbd0, int xbd1, int ybd0, int ybd1, int *xas_const, int *xbs_const, float *xwts_const, int *yas_const, int *ybs_const, float *ywts_const) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < dst_wd) && (y_pos < dst_ht)) { int xa, ya, yb; float wt, wt1; float *A00, *A01, *A02, *A03, *B00; float *A10, *A11, *A12, *A13, *B10; float *A20, *A21, *A22, *A23, *B20; float *A0 = dev_in_img + 0; float *B0 = dev_out_img + (0 * dst_ht * dst_wd); float *A1 = dev_in_img + 1; float *B1 = dev_out_img + (1 * dst_ht * dst_wd); float *A2 = dev_in_img + 2; float *B2 = dev_out_img + (2 * dst_ht * dst_wd); int y1 = 0; if (org_ht > dst_ht) { int m = 1; for (int iter = 0; iter < y_pos; iter++) { while (y1 + m < hn && yb == ybs_const[y1 + m]) m++; y1 += m; } wt = ywts_const[y1]; wt1 = 1 - wt; } else { y1 = y_pos; wt = ywts_const[y1]; wt1 = 1 - wt; } if (y_pos == 0) y1 = 0; ya = yas_const[y1]; A00 = A0 + (ya * org_wd * n_channels); A01 = A00 + (org_wd * n_channels); A02 = A01 + (org_wd * n_channels); A03 = A02 + (org_wd * n_channels); A10 = A1 + (ya * org_wd * n_channels); A11 = A00 + (org_wd * n_channels); A12 = A01 + (org_wd * n_channels); A13 = A02 + (org_wd * n_channels); A20 = A2 + (ya * org_wd * n_channels); A21 = A00 + (org_wd * n_channels); A22 = A01 + (org_wd * n_channels); A23 = A02 + (org_wd * n_channels); yb = ybs_const[y1]; B00 = B0 + (yb * dst_wd); B10 = B1 + (yb * dst_wd); B20 = B2 + (yb * dst_wd); if (x_pos < org_wd) { // resample along y direction if (org_ht > dst_ht) { int m = 1; while ((y1 + m < hn) && (yb == ybs_const[y1 + m])) m++; if (m == 1) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels] * ywts_const[y1]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels] * ywts_const[y1]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels] * ywts_const[y1]; } else if (m == 2) { dev_C0_tmp[x_pos] = (A00[x_pos * n_channels] * ywts_const[y1 + 0]) + (A01[x_pos * n_channels] * ywts_const[y1 + 1]); dev_C1_tmp[x_pos] = (A10[x_pos * n_channels] * ywts_const[y1 + 0]) + (A11[x_pos * n_channels] * ywts_const[y1 + 1]); dev_C2_tmp[x_pos] = (A20[x_pos * n_channels] * ywts_const[y1 + 0]) + (A21[x_pos * n_channels] * ywts_const[y1 + 1]); } else if (m == 3) { dev_C0_tmp[x_pos] = (A00[x_pos * n_channels] * ywts_const[y1 + 0]) + (A01[x_pos * n_channels] * ywts_const[y1 + 1]) + (A02[x_pos * n_channels] * ywts_const[y1 + 2]); dev_C1_tmp[x_pos] = (A10[x_pos * n_channels] * ywts_const[y1 + 0]) + (A11[x_pos * n_channels] * ywts_const[y1 + 1]) + (A12[x_pos * n_channels] * ywts_const[y1 + 2]); dev_C2_tmp[x_pos] = (A20[x_pos * n_channels] * ywts_const[y1 + 0]) + (A21[x_pos * n_channels] * ywts_const[y1 + 1]) + (A22[x_pos * n_channels] * ywts_const[y1 + 2]); } else if (m >= 4) { dev_C0_tmp[x_pos] = (A00[x_pos * n_channels] * ywts_const[y1 + 0]) + (A01[x_pos * n_channels] * ywts_const[y1 + 1]) + (A02[x_pos * n_channels] * ywts_const[y1 + 2]) + (A03[x_pos * n_channels] * ywts_const[y1 + 3]); dev_C1_tmp[x_pos] = (A10[x_pos * n_channels] * ywts_const[y1 + 0]) + (A11[x_pos * n_channels] * ywts_const[y1 + 1]) + (A12[x_pos * n_channels] * ywts_const[y1 + 2]) + (A13[x_pos * n_channels] * ywts_const[y1 + 3]); dev_C2_tmp[x_pos] = (A20[x_pos * n_channels] * ywts_const[y1 + 0]) + (A21[x_pos * n_channels] * ywts_const[y1 + 1]) + (A22[x_pos * n_channels] * ywts_const[y1 + 2]) + (A23[x_pos * n_channels] * ywts_const[y1 + 3]); } for (int y0 = 4; y0 < m; y0++) { A01 = A00 + (y0 * org_wd * n_channels); A11 = A10 + (y0 * org_wd * n_channels); A11 = A10 + (y0 * org_wd * n_channels); wt1 = ywts_const[y1 + y0]; dev_C0_tmp[x_pos] = dev_C0_tmp[x_pos] + (A01[x_pos * n_channels] * wt1); dev_C1_tmp[x_pos] = dev_C1_tmp[x_pos] + (A11[x_pos * n_channels] * wt1); dev_C2_tmp[x_pos] = dev_C2_tmp[x_pos] + (A21[x_pos * n_channels] * wt1); } } else { bool yBd = y_pos < ybd0 || y_pos >= dst_ht - ybd1; if (yBd) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels]; } else { dev_C0_tmp[x_pos] = (A00[x_pos * n_channels] * wt) + (A01[x_pos * n_channels] * wt1); dev_C1_tmp[x_pos] = (A10[x_pos * n_channels] * wt) + (A11[x_pos * n_channels] * wt1); dev_C2_tmp[x_pos] = (A20[x_pos * n_channels] * wt) + (A21[x_pos * n_channels] * wt1); } } } /* ensure that all threads have calculated the values for C until this point */ __syncthreads(); if (x_pos < dst_wd) { // resample along x direction (B -> C) if (org_wd > dst_wd) { if (xbd0 == 2) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); } else if (xbd0 == 3) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); } else if (xbd0 == 4) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C0_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C1_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C2_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); } else if (xbd0 > 4) { for(int x = 0; x < wn; x++) { B00[xbs_const[x]] += dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[xbs_const[x]] += dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[xbs_const[x]] += dev_C2_tmp[xas_const[x]] * xwts_const[x]; } } } else { int x = 0; for (x = 0; x < xbd0; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x]; } for (; x < dst_wd - xbd1; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x] + dev_C0_tmp[xas_const[x] + 1] * (r - xwts_const[x]); B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x] + dev_C1_tmp[xas_const[x] + 1] * (r - xwts_const[x]); B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x] + dev_C2_tmp[xas_const[x] + 1] * (r - xwts_const[x]); } for (; x < dst_wd; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x]; } } } __syncthreads(); } } /***********************************************************************************/ /* GPU Functions to launch CUDA KERNELS */ /* These are basically ported versions of CPU functions as wrappers around kernels */ /***********************************************************************************/ void img_process::rgb2luv_gpu(cv::Mat& in_img, cv::Mat& out_img, float nrm, bool useRGB) { CV_Assert(in_img.type() == CV_32FC3); static int cnt; if (cnt == 0) { rgb2luv_setup_gpu(nrm); } cv::Mat res_img(in_img.rows, in_img.cols, CV_32FC3); out_img = res_img; hipError_t cuda_ret; #if 0 unsigned char *dev_input_img; /* input image is of type 8UC3 (8 bit unsigned 3 channel) */ float *dev_output_luv_img; /* output image is of type 32FC3 (32 bit float 3 channel) */ #endif unsigned int in_img_size_total = in_img.step * in_img.rows; unsigned int out_img_size_total = res_img.step * res_img.rows; if (cnt == 0) { /* Allocate required memory on GPU device for both input and output images */ cuda_ret = hipMalloc((void **)&dev_input_img, in_img_size_total); if (cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void **)&dev_output_luv_img, out_img_size_total); if (cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cnt++; } /* Copy data from OpenCV input image to device memory */ cuda_ret = hipMemcpy(dev_input_img, in_img.ptr<unsigned char>(0), in_img_size_total, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy to device memory"); /* Specify a reasonable block size */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); /* Calculate grid size to cover the whole image */ const dim3 dim_grid(ceil(in_img.cols / BLOCK_SIZE), ceil(in_img.rows / BLOCK_SIZE)); hipLaunchKernelGGL(( convert_to_luv_gpu_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, dev_input_img, dev_output_luv_img, in_img.cols, in_img.rows, useRGB); /* Synchronize to check for any kernel launch errors */ cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); /* Copy back data from device memory to OpenCV output image */ cuda_ret = hipMemcpy(res_img.ptr<float>(0), dev_output_luv_img, out_img_size_total, hipMemcpyDeviceToHost); if (cuda_ret != hipSuccess) FATAL("Unable to copy from device memory"); #if 0 /* Free the device memory */ cuda_ret = hipFree(dev_input_img); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_output_luv_img); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); #endif return; } void img_process::rgb2luv_gpu(cv::Mat& in_img, cv::Mat& out_img) { CV_Assert(in_img.type() == CV_8UC3); float nrm = 1.0f/255; static int cnt; if (cnt == 0) { rgb2luv_setup_gpu(nrm); } cv::Mat res_img(in_img.rows, in_img.cols, CV_32FC3); out_img = res_img; hipError_t cuda_ret; unsigned int in_img_size_total = in_img.step * in_img.rows; unsigned int out_img_size_total = res_img.step * res_img.rows; if (cnt == 0) { /* Allocate required memory on GPU device for both input and output images */ cuda_ret = hipMalloc((void **)&dev_input_img, in_img_size_total); if (cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cuda_ret = hipMalloc((void **)&dev_output_luv_img, out_img_size_total); if (cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); cnt++; } /* Copy data from OpenCV input image to device memory */ cuda_ret = hipMemcpy(dev_input_img, in_img.ptr<unsigned char>(0), in_img_size_total, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy to device memory"); /* Specify a reasonable block size */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); /* Calculate grid size to cover the whole image */ const dim3 dim_grid(ceil(in_img.cols / BLOCK_SIZE), ceil(in_img.rows / BLOCK_SIZE)); hipLaunchKernelGGL(( convert_to_luv_gpu_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, dev_input_img, dev_output_luv_img, in_img.cols, in_img.rows, false); /* Synchronize to check for any kernel launch errors */ cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); /* Copy back data from device memory to OpenCV output image */ cuda_ret = hipMemcpy(res_img.ptr<float>(0), dev_output_luv_img, out_img_size_total, hipMemcpyDeviceToHost); if (cuda_ret != hipSuccess) FATAL("Unable to copy from device memory"); return; } void img_process::free_gpu(void) { hipError_t cuda_ret; /* Free the device memory */ if (dev_input_img) { cuda_ret = hipFree(dev_input_img); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); dev_input_img = NULL; } if (dev_output_luv_img) { cuda_ret = hipFree(dev_output_luv_img); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); dev_output_luv_img = NULL; } } void img_process::rgb2luv_setup_gpu(float nrm) { /* set constants for conversion */ const float y0 = ((6.0f / 29) * (6.0f / 29) * (6.0f / 29)); const float a = ((29.0f / 3) * (29.0f / 3) * (29.0f / 3)); mr[0] = 0.430574f * nrm; mr[1] = 0.222015f * nrm; mr[2] = 0.020183f * nrm; mg[0] = 0.341550f * nrm; mg[1] = 0.706655f * nrm; mg[2] = 0.129553f * nrm; mb[0] = 0.178325f * nrm; mb[1] = 0.071330f * nrm; mb[2] = 0.939180f * nrm; hipError_t cuda_ret; cuda_ret = hipMemcpyToSymbol(mr_const, mr, sizeof(float) * 3, 0); if (cuda_ret != hipSuccess) FATAL("Unable to copy to constant memory"); cuda_ret = hipMemcpyToSymbol(mg_const, mg, sizeof(float) * 3, 0); if (cuda_ret != hipSuccess) FATAL("Unable to copy to constant memory"); cuda_ret = hipMemcpyToSymbol(mb_const, mb, sizeof(float) * 3, 0); if (cuda_ret != hipSuccess) FATAL("Unable to copy to constant memory"); /* build (padded) lookup table for y->l conversion assuming y in [0,1] */ float maxi = 1.0f / 270; float y, l; for (int i = 0; i < 1025; i++) { y = (i / 1024.0); l = y > y0 ? 116 * pow((double)y, 1.0 / 3.0) - 16 : y * a; lTable[i] = l * maxi; } for(int i = 1025; i < 1064; i++) lTable[i] = lTable[i - 1]; cuda_ret = hipMemcpyToSymbol(lTable_const, lTable, sizeof(float) * 1064, 0); if (cuda_ret != hipSuccess) FATAL("Unable to copy to constant memory"); return; } void img_process::imResample_array_int2lin_gpu(float* in_img_gpu, float* out_img, int n_channels, int org_ht, int org_wd, int dst_ht, int dst_wd, float r) { int hn, wn; // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, dst_wd, output --> wn, xas, xbs, xwts, xbd /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); if (org_ht == 2 * dst_ht) r /= 2; if (org_ht == 3 * dst_ht) r /= 3; if (org_ht == 4 * dst_ht) r /= 4; r /= float(1 + 1e-6); for (int x = 0; x < wn; x++) xwts[x] *= r; memset(out_img, 0, sizeof(float) * dst_ht * dst_wd * n_channels); hipError_t cuda_ret; float *dev_out_rsmpl_img, *dev_C_temp0, *dev_C_temp1, *dev_C_temp2; int *xas_const = NULL, *xbs_const = NULL, *yas_const = NULL, *ybs_const = NULL; float *xwts_const = NULL, *ywts_const = NULL; int out_img_size_total = sizeof(float) * dst_ht * dst_wd * n_channels; cuda_ret = hipMalloc((void **)&dev_C_temp0, sizeof(float) * (org_wd + 4)); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&dev_C_temp1, sizeof(float) * (org_wd + 4)); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&dev_C_temp2, sizeof(float) * (org_wd + 4)); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); /* output image size changes frequently have to allocate each time */ cuda_ret = hipMalloc((void **)&dev_out_rsmpl_img, out_img_size_total); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&yas_const, sizeof(int) * hn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&ybs_const, sizeof(int) * hn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&ywts_const, sizeof(float) * hn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); /* wait all till malloc finishes */ cuda_ret = hipDeviceSynchronize(); cuda_ret = hipMemcpy(yas_const, yas, sizeof(int) * hn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(ybs_const, ybs, sizeof(int) * hn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(ywts_const, ywts, sizeof(float) * hn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); /* choose grid to cover entire output image */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); const dim3 dim_grid(ceil(dst_wd / BLOCK_SIZE), ceil(dst_ht / BLOCK_SIZE)); //hipStream_t stream[n_channels]; /* Create CUDA streams so that each channel operations can be done simultaneously */ /*for (int iter = 0; iter < n_channels; iter++) { cuda_ret = hipStreamCreate(&stream[iter]); if(cuda_ret != hipSuccess) FATAL("Unable to create CUDA streams"); }*/ cuda_ret = hipDeviceSynchronize(); if ((org_ht == dst_ht) || (org_ht == 2 * dst_ht) || (org_ht == 3 * dst_ht) || (org_ht == 4 * dst_ht)) { //for (int n = 0; n < n_channels; n++) { hipLaunchKernelGGL(( int2lin_resmpl_good_gpu_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, in_img_gpu, dev_out_rsmpl_img, dev_C_temp0, dev_C_temp1, dev_C_temp2, org_wd, org_ht, dst_wd, dst_ht, n_channels, r, yas_const, ybs_const); //resample_chnl_gpu_kernel1<<<dim_grid, dim_block, 0, stream[1]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp1, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 1, r, yas_const, ybs_const); //resample_chnl_gpu_kernel1<<<dim_grid, dim_block, 0, stream[2]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp2, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 2, r, yas_const, ybs_const); } /* wait for all streams to finish computing */ cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel2"); } else { cuda_ret = hipMalloc((void **)&xas_const, sizeof(int) * wn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&xbs_const, sizeof(int) * wn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&xwts_const, sizeof(float) * wn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipDeviceSynchronize(); cuda_ret = hipMemcpy(xas_const, xas, sizeof(int) * wn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(xbs_const, xbs, sizeof(int) * wn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(xwts_const, xwts, sizeof(float) * wn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipDeviceSynchronize(); //for (int n = 0; n < n_channels; n++) { hipLaunchKernelGGL(( int2lin_resmpl_messy_gpu_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, in_img_gpu, dev_out_rsmpl_img, dev_C_temp0, dev_C_temp1, dev_C_temp2, org_wd, org_ht, dst_wd, dst_ht, n_channels, r, hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], xas_const, xbs_const, xwts_const, yas_const, ybs_const, ywts_const); //resample_chnl_gpu_kernel2<<<dim_grid, dim_block, 0, stream[1]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp1, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 1, r, // hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], // xas_const, xbs_const, xwts_const, // yas_const, ybs_const, ywts_const); //resample_chnl_gpu_kernel2<<<dim_grid, dim_block, 0, stream[2]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp2, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 2, r, // hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], // xas_const, xbs_const, xwts_const, // yas_const, ybs_const, ywts_const); } /* wait for all streams to finish computing */ cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel2"); cuda_ret = hipFree(xas_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(xbs_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(xwts_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); } cuda_ret = hipMemcpy(out_img, dev_out_rsmpl_img, out_img_size_total, hipMemcpyDeviceToHost); if (cuda_ret != hipSuccess) FATAL("Unable to copy from device memory"); cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel2"); cuda_ret = hipFree(dev_out_rsmpl_img); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_C_temp0); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_C_temp1); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_C_temp2); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(yas_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(ybs_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(ywts_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); /*for (int i = 0; i < n_channels; i++) { cuda_ret = hipStreamDestroy(stream[i]); if(cuda_ret != hipSuccess) FATAL("Unable to destroy CUDA streams"); }*/ cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel2"); delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } void img_process::imResample_array_lin2lin_gpu(float* in_img, float* out_img, int n_channels, int org_ht, int org_wd, int dst_ht, int dst_wd, float r) { int hn, wn; // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, dst_wd, output --> wn, xas, xbs, xwts, xbd /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); if (org_ht == 2 * dst_ht) r /= 2; if (org_ht == 3 * dst_ht) r /= 3; if (org_ht == 4 * dst_ht) r /= 4; r /= float(1 + 1e-6); for (int x = 0; x < wn; x++) xwts[x] *= r; memset(out_img, 0, sizeof(float) * dst_ht * dst_wd * n_channels); hipError_t cuda_ret; float *in_img_temp, *dev_out_rsmpl_img, *dev_C_temp0, *dev_C_temp1, *dev_C_temp2; int *xas_const = NULL, *xbs_const = NULL, *yas_const = NULL, *ybs_const = NULL; float *xwts_const = NULL, *ywts_const = NULL; int in_img_size_total = sizeof(float) * org_ht * org_wd * n_channels; int out_img_size_total = sizeof(float) * dst_ht * dst_wd * n_channels; cuda_ret = hipMalloc((void **)&dev_C_temp0, sizeof(float) * (org_wd + 4)); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&dev_C_temp1, sizeof(float) * (org_wd + 4)); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&dev_C_temp2, sizeof(float) * (org_wd + 4)); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); /* output image size changes frequently have to allocate each time */ cuda_ret = hipMalloc((void **)&dev_out_rsmpl_img, out_img_size_total); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); /* input image size changes frequently have to allocate each time */ cuda_ret = hipMalloc((void **)&in_img_temp, in_img_size_total); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&yas_const, sizeof(int) * hn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&ybs_const, sizeof(int) * hn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&ywts_const, sizeof(float) * hn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); /* wait all till malloc finishes */ cuda_ret = hipDeviceSynchronize(); //cout << "here2\n"; cuda_ret = hipMemcpy(in_img_temp, in_img, in_img_size_total, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(yas_const, yas, sizeof(int) * hn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(ybs_const, ybs, sizeof(int) * hn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(ywts_const, ywts, sizeof(float) * hn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); /* choose grid to cover entire output image */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); const dim3 dim_grid(ceil(dst_wd / BLOCK_SIZE), ceil(dst_ht / BLOCK_SIZE)); ; /*hipStream_t stream[n_channels]; /* Create CUDA streams so that each channel operations can be done simultaneously */ /*for (int iter = 0; iter < n_channels; iter++) { cuda_ret = hipStreamCreate(&stream[iter]); if(cuda_ret != hipSuccess) FATAL("Unable to create CUDA streams"); }*/ hipDeviceSynchronize(); if ((org_ht == dst_ht) || (org_ht == 2 * dst_ht) || (org_ht == 3 * dst_ht) || (org_ht == 4 * dst_ht)) { //for (int n = 0; n < n_channels; n++) { hipLaunchKernelGGL(( lin2lin_resmpl_good_gpu_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, in_img_temp, dev_out_rsmpl_img, dev_C_temp0, dev_C_temp1, dev_C_temp2, org_wd, org_ht, dst_wd, dst_ht, n_channels, r, yas_const, ybs_const); //resample_chnl_gpu_kernel1<<<dim_grid, dim_block, 0, stream[1]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp1, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 1, r, yas_const, ybs_const); //resample_chnl_gpu_kernel1<<<dim_grid, dim_block, 0, stream[2]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp2, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 2, r, yas_const, ybs_const); } /* wait for all streams to finish computing */ cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel3"); } else { cuda_ret = hipMalloc((void **)&xas_const, sizeof(int) * wn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&xbs_const, sizeof(int) * wn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&xwts_const, sizeof(float) * wn); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); hipDeviceSynchronize(); cuda_ret = hipMemcpy(xas_const, xas, sizeof(int) * wn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(xbs_const, xbs, sizeof(int) * wn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); cuda_ret = hipMemcpy(xwts_const, xwts, sizeof(float) * wn, hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); hipDeviceSynchronize(); //for (int n = 0; n < n_channels; n++) { hipLaunchKernelGGL(( lin2lin_resmpl_messy_gpu_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, in_img_temp, dev_out_rsmpl_img, dev_C_temp0, dev_C_temp1, dev_C_temp2, org_wd, org_ht, dst_wd, dst_ht, n_channels, r, hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], xas_const, xbs_const, xwts_const, yas_const, ybs_const, ywts_const); //resample_chnl_gpu_kernel2<<<dim_grid, dim_block, 0, stream[1]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp1, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 1, r, // hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], // xas_const, xbs_const, xwts_const, // yas_const, ybs_const, ywts_const); //resample_chnl_gpu_kernel2<<<dim_grid, dim_block, 0, stream[2]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp2, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 2, r, // hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], // xas_const, xbs_const, xwts_const, // yas_const, ybs_const, ywts_const); } /* wait for all streams to finish computing */ cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel4"); cuda_ret = hipFree(xas_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(xbs_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(xwts_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); } cuda_ret = hipMemcpy(out_img, dev_out_rsmpl_img, out_img_size_total, hipMemcpyDeviceToHost); if (cuda_ret != hipSuccess) FATAL("Unable to copy from device memory"); cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel2"); cuda_ret = hipFree(dev_out_rsmpl_img); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(in_img_temp); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_C_temp0); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_C_temp1); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_C_temp2); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(yas_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(ybs_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(ywts_const); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); /*for (int i = 0; i < n_channels; i++) { cuda_ret = hipStreamDestroy(stream[i]); if(cuda_ret != hipSuccess) FATAL("Unable to destroy CUDA streams"); }*/ cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel2"); delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } void img_process::ConvTri1_gpu(float* I, float* O, int ht, int wd, int dim, float p, int s) { const float nrm = 1.0f / ((p + 2) * (p + 2)); hipError_t cuda_ret; float *dev_I, *dev_O, *dev_T0, *dev_T1, *dev_T2; cuda_ret = hipMalloc((void **)&dev_I, sizeof(float) * (ht * wd * dim)); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&dev_O, sizeof(float) * (ht * wd * dim)); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&dev_T0, sizeof(float) * ht * wd); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&dev_T1, sizeof(float) * ht * wd); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); cuda_ret = hipMalloc((void **)&dev_T2, sizeof(float) * ht * wd); if (cuda_ret != hipSuccess) FATAL("Unable to allocate memory"); hipDeviceSynchronize(); cuda_ret = hipMemcpy(dev_I, I, sizeof(float) * (ht * wd * dim), hipMemcpyHostToDevice); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); hipDeviceSynchronize(); /* choose grid to cover entire output image */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); const dim3 dim_grid(ceil(wd / BLOCK_SIZE), ceil(ht / BLOCK_SIZE)); hipLaunchKernelGGL(( trianguler_convolution_gpu_kernel), dim3(dim_grid), dim3(dim_block), 0, 0, dev_I, dev_O, dev_T0, dev_T1, dev_T2, wd, ht, nrm, p); cuda_ret = hipDeviceSynchronize(); if (cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); cuda_ret = hipMemcpy(O, dev_O, sizeof(float) * (ht * wd * dim), hipMemcpyDeviceToHost); if (cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); hipDeviceSynchronize(); cuda_ret = hipFree(dev_I); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_O); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_T0); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_T1); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); cuda_ret = hipFree(dev_T2); if (cuda_ret != hipSuccess) FATAL("Unable to free device memory"); hipDeviceSynchronize(); } /******************************************************************************/ /* CPU Functions */ /******************************************************************************/ void img_process::rgb2luv(cv::Mat& in_img, cv::Mat& out_img, float nrm, bool useRGB) { CV_Assert( in_img.type() == CV_32FC3); rgb2luv_setup(nrm); float *R, *G, *B; if(!useRGB) /// default RGB order R = in_img.ptr<float>(0), G = in_img.ptr<float>(0) + 1, B = in_img.ptr<float>(0) + 2; else /// use opencv's built in RGB order: B = in_img.ptr<float>(0), G = in_img.ptr<float>(0) + 1, R = in_img.ptr<float>(0) + 2; cv::Mat res_img(in_img.rows, in_img.cols, CV_32FC3); out_img = res_img; int n = in_img.rows * in_img.cols; /// xma opencv order of each channel: /// get l,u,v pointer and r g b pointer float *L=out_img.ptr<float>(0), *U=out_img.ptr<float>(0) + 1, *V=out_img.ptr<float>(0) + 2; for( int i=0; i<n; i++ ) { float r, g, b, x, y, z, l; r=*R; g=*G; b=*B; R += 3; G += 3; B += 3; x = mr[0]*r + mg[0]*g + mb[0]*b; y = mr[1]*r + mg[1]*g + mb[1]*b; z = mr[2]*r + mg[2]*g + mb[2]*b; l = lTable[static_cast<int>((y*1024))]; *(L) = l; z = 1/(x + 15*y + 3*z + (float)1e-35); *(U) = l * (13*4*x*z - 13*un) - minu; *(V) = l * (13*9*y*z - 13*vn) - minv; L += 3; U += 3; V += 3; } return; } void img_process::rgb2luv(cv::Mat& in_img, cv::Mat& out_img) { CV_Assert( in_img.type() == CV_8UC3); float nrm = 1.0f/255; rgb2luv_setup(nrm); unsigned char *B = in_img.ptr<unsigned char>(0), *G = in_img.ptr<unsigned char>(0) + 1, *R = in_img.ptr<unsigned char>(0) + 2; cv::Mat res_img(in_img.rows, in_img.cols, CV_32FC3); out_img = res_img; int n = in_img.rows * in_img.cols; /// xma opencv order of each channel: /// get l,u,v pointer and r g b pointer float *L=out_img.ptr<float>(0), *U=out_img.ptr<float>(0) + 1, *V=out_img.ptr<float>(0) + 2; for( int i=0; i<n; i++ ) { float r, g, b, x, y, z, l; r=static_cast<float>(*R); g=static_cast<float>(*G); b=static_cast<float>(*B); R += 3; G += 3; B += 3; x = mr[0]*r + mg[0]*g + mb[0]*b; y = mr[1]*r + mg[1]*g + mb[1]*b; z = mr[2]*r + mg[2]*g + mb[2]*b; l = lTable[static_cast<int>((y*1024))]; *(L) = l; z = 1/(x + 15*y + 3*z + (float)1e-35); *(U) = l * (13*4*x*z - 13*un) - minu; *(V) = l * (13*9*y*z - 13*vn) - minv; L += 3; U += 3; V += 3; } return; } void img_process::rgb2luv_setup(float nrm) { // set constants for conversion const float y0 = ((6.0f/29)*(6.0f/29)*(6.0f/29)); const float a = ((29.0f/3)*(29.0f/3)*(29.0f/3)); un = 0.197833f; vn = 0.468331f; mr[0]= 0.430574f*nrm; mr[1]= 0.222015f*nrm; mr[2]= 0.020183f*nrm; mg[0]= 0.341550f*nrm; mg[1]= 0.706655f*nrm; mg[2]= 0.129553f*nrm; mb[0]= 0.178325f*nrm; mb[1]= 0.071330f*nrm; mb[2]= 0.939180f*nrm; float maxi= 1.0f/270; minu=-88.0f*maxi; minv=-134.0f*maxi; // build (padded) lookup table for y->l conversion assuming y in [0,1] float y, l; for(int i=0; i<1025; i++) { y = (i/1024.0); l = y>y0 ? 116*pow((double)y,1.0/3.0)-16 : y*a; lTable[i] = l*maxi; } for(int i=1025; i<1064; i++) lTable[i]=lTable[i-1]; return; } void img_process::resampleCoef( int ha, int hb, int &n, int *&yas, int *&ybs, float *&wts, int bd[2], int pad) { /// xma input: ha, hb, /// xma output: n, yas, ybs, wts, bd,0 /// xma s is the scale factor const float s = static_cast<float>(hb)/static_cast<float>(ha), sInv = 1/s; float wt, wt0=static_cast<float>(1e-3)*s; //cout << "s = " << s << " sInv = " << sInv << " wt0 = " << wt0 << " pad = " << pad << endl; /// determine either downsample or upsample for resampling bool ds=ha>hb; int nMax; bd[0]=bd[1]=0; if(ds) { n=0; nMax=ha+(pad>2 ? pad : 2)*hb; } else { n=nMax=hb; } //cout << "nMax = " << nMax << endl; // initialize memory wts = new float[nMax]; yas = new int[nMax]; ybs = new int[nMax]; if( ds ) { for( int yb=0; yb<hb; yb++ ) { // create coefficients for downsampling float ya0f=yb*sInv, ya1f=ya0f+sInv, W=0; int ya0=int(ceil(ya0f)), ya1=int(ya1f), n1=0; //cout << "ya0f = " << ya0f << ", ya1f = " << ya1f << ", ya0 = << " << ya0 << ", ya1 = " << ya1 << endl; for( int ya=ya0-1; ya<ya1+1; ya++ ) { wt=s; if(ya==ya0-1) wt=(ya0-ya0f)*s; else if(ya==ya1) wt=(ya1f-ya1)*s; /// only when the weight is larger than 10-3, consider it as a valid weight (at the edge). if(wt>wt0 && ya>=0) { ybs[n]=yb; yas[n]=ya; wts[n]=wt; n++; n1++; W+=wt; } } if(W>1) for( int i=0; i<n1; i++ ) wts[n-n1+i]/=W; if(n1>bd[0]) bd[0]=n1; while( n1<pad ) { ybs[n]=yb; yas[n]=yas[n-1]; wts[n]=0; n++; n1++; } } } else { for( int yb=0; yb<hb; yb++ ) { // create coefficients for upsampling float yaf = (float(.5)+yb)*sInv-float(.5); int ya=(int) floor(yaf); wt=1; if(ya>=0 && ya<ha-1) wt=1-(yaf-ya); if(ya<0) { ya=0; bd[0]++; } if(ya>=ha-1) { ya=ha-1; bd[1]++; } ybs[yb]=yb; yas[yb]=ya; wts[yb]=wt; } } /* //cout << left << setw(15) << "wts " << left << setw(15) << "yas " << left << setw(15) << "ybs" << endl; for(int idx = 0; idx < nMax; ++idx) //cout << left << setw(15) << wts[idx] << left << setw(15) << yas[idx] << left << setw(15) << ybs[idx] << endl; //cout << "n = " << n << " bd[0] = " << bd[0] << " bd[1] = " << bd[1] << endl << endl << endl << endl; */ } /// bilinear interpolation methods to resize image (opencv mat version, no SSE, interleaved to interleaved memory) void img_process::imResample(cv::Mat& in_img, cv::Mat& out_img, int dheight, int dwidth, float r ) { cv::Mat img_resample = cv::Mat::zeros(dheight, dwidth, in_img.type()); int d = 1; if(in_img.type() == CV_32FC1) d = 1; else if(in_img.type() == CV_32FC2) d = 2; else if(in_img.type() == CV_32FC3) d = 3; else CV_Assert(0); int org_ht = in_img.rows, org_wd = in_img.cols, dst_ht = dheight, dst_wd = dwidth; out_img = img_resample; int hn, wn, x, /*x1,*/ y, z, xa, /*xb,*/ ya, yb, y1 /* xma added to convert from col major to row major*/; float *A0, *A1, *A2, *A3, *B0, wt, wt1; /// xma prepare 128-bit aligned array C of org height+4 and set boundary values to 0 float *C = new float[org_wd+4]; for(x=org_wd; x<org_wd+4; x++) C[x]=0; //bool sse = (typeid(T)==typeid(float)) && !(size_t(A)&15) && !(size_t(B)&15); // sse = false // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, org_wd, output wn, xas, xbs, xwts, xbd,0 /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); if( org_ht==2*dst_ht ) r/=2; if( org_ht==3*dst_ht ) r/=3; if( org_ht==4*dst_ht ) r/=4; r/=float(1+1e-6); for( x=0; x<wn; x++ ) { xwts[x] *= r; //cout << "xwts[" << x << "] = " << xwts[x] << endl; } // resample each color channel separately) for( z=0; z<d; z++ ) { float *A = in_img.ptr<float>(0) + z; float *B = img_resample.ptr<float>(0) + z; for( y=0; y<dst_ht; y++) { if(y==0) y1=0; ya=yas[y1]; yb=ybs[y1]; wt=ywts[y1]; wt1=1-wt; x=0; /// xma four points in org img for bilinear interpolation /// xma z*org_ht*org_wd is color channel offset, A0=A+ya*org_wd*d; // point to current row based on ya, (memory channel is interleaved, so each row takes org_wd*d spaces) /// bilinear interpolation, each direction, need to use 4 points to estimate the final value A1=A0+org_wd*d ; A2=A1+org_wd*d ; A3=A2+org_wd*d ; /// compute the pointer to the resampled image, current scale(for interleaved color channel) B0=B+yb*dst_wd*d; //cout << "ya = " << ya << " yb = " << yb << " wt = " << wt << " wt1 = " << wt1 << endl; //cout << "A0 = " << *A0 << " A1 = " << *A1 << " A2 = " << *A2 << " A3 = " << *A3 << endl; // resample along y direction if( org_ht==2*dst_ht ) { //cout << "testing scale height by 1/2." << endl; for(; x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d]; } y1 += 2; } else if( org_ht==3*dst_ht ) { //cout << "testing scale height by 1/3." << endl; for(;x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d] + A2[x*d]; } y1+=3; } else if( org_ht==4*dst_ht ) { //cout << "testing scale height by 1/4." << endl; for(;x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d] + A2[x*d] + A3[x*d]; } y1+=4; } else if( org_ht > dst_ht ) { //cout << "testing scale height by any other number." << endl; int m=1; while( y1+m<hn && yb==ybs[y1+m] ) m++; //cout << "hn = " << hn << " y1 = " << y1 << " m = " << m << endl; if(m==1) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1]; } } if(m==2) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1]; } } if(m==3) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1] + A2[x*d] * ywts[y1+2]; } } if(m>=4) { for(; x < org_wd;++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1] + A2[x*d] * ywts[y1+2] + A3[x*d] * ywts[y1+3]; } } for( int y0=4; y0<m; y0++ ) { A1=A0+y0*org_wd*d; wt1=ywts[y1+y0]; x=0; for(; x < org_wd; ++x) { C[x] = C[x] + A1[x*d]*wt1; } } y1+=m; } else { //cout << "testing scale height up " << endl; bool yBd = y < ybd[0] || y>=dst_ht-ybd[1]; y1++; //cout << "yBd = " << yBd << " ybd[0] = " << ybd[0] << " ybd[1] = " << ybd[1] << " y1 = " << y1 << endl; if(yBd) for(int tempx = 0; tempx < org_wd; ++tempx) C[tempx] = A0[tempx*d]; else { for(int tempx = 0; tempx < org_wd; ++tempx) { C[tempx] = A0[tempx*d]*wt + A1[tempx*d]*wt1; } } } // resample along x direction (B -> C) if( org_wd==dst_wd*2 ) { //cout << "testing scale width by 1/2." << endl; float r2 = r/2; for(x=0 ; x < dst_wd; x++ ) B0[x*d]=(C[2*x]+C[2*x+1])*r2; } else if( org_wd==dst_wd*3 ) { //cout << "testing scale width by 1/3." << endl; for(x=0; x<dst_wd; x++) B0[x*d]=(C[3*x]+C[3*x+1]+C[3*x+2])*(r/3); } else if( org_wd==dst_wd*4 ) { //cout << "testing scale width by 1/4." << endl; for(x=0; x<dst_wd; x++) B0[x*d]=(C[4*x]+C[4*x+1]+C[4*x+2]+C[4*x+3])*(r/4); } else if( org_wd>dst_wd ) { //cout << "testing scale width by any number." << endl; //cout << "xbd[0] = " << xbd[0] << endl; x=0; //#define U(o) C[xa+o]*xwts[x*4+o] if(xbd[0]==2) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x*d] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1];// U(0)+U(1); } if(xbd[0]==3) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x*d] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2];//U(0)+U(1)+U(2); } if(xbd[0]==4) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x*d] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2] + C[xa+3]*xwts[x*4+3];//U(0)+U(1)+U(2)+U(3); } if(xbd[0]>4) for(; x<wn; x++) { B0[xbs[x]*d] += C[xas[x]] * xwts[x]; } } else { //cout << "testing scale width up!" << endl; for(x=0; x<xbd[0]; x++) B0[x*d] = C[xas[x]]*xwts[x]; for(; x<dst_wd-xbd[1]; x++) B0[x*d] = C[xas[x]]*xwts[x]+C[xas[x]+1]*(r-xwts[x]); for(; x<dst_wd; x++) B0[x*d] = C[xas[x]]*xwts[x]; } } } delete[] C; delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } /// bilinear interpolation methods to resize image (array version, no SSE) /// note that for the input array, the different color channels are interleaved, but for the output array, the memory channels are separated void img_process::imResample_array_int2lin(float* in_img, float* out_img, int d, int org_ht, int org_wd, int dst_ht, int dst_wd, float r ) { int hn, wn, x, /*x1,*/ y, z, xa, /*xb,*/ ya, yb, y1 /* xma added to convert from col major to row major*/; float *A0, *A1, *A2, *A3, *B0, wt, wt1; /// xma prepare 128-bit aligned array C of org height+4 and set boundary values to 0 float *C = new float[org_wd+4]; for(x=org_wd; x<org_wd+4; x++) C[x]=0; //bool sse = (typeid(T)==typeid(float)) && !(size_t(A)&15) && !(size_t(B)&15); // sse = false // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, org_wd, output wn, xas, xbs, xwts, xbd,0 /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); if( org_ht==2*dst_ht ) r/=2; if( org_ht==3*dst_ht ) r/=3; if( org_ht==4*dst_ht ) r/=4; r/=float(1+1e-6); for( x=0; x<wn; x++ ) { xwts[x] *= r; //cout << "xwts[" << x << "] = " << xwts[x] << endl; } /// check if only re-assemble the pixel values: if(org_ht == dst_ht && org_wd == dst_wd) { for(int chn = 0; chn < d; ++chn) for(int idx = chn; idx < org_ht*org_wd*d; idx += d) { out_img[0] = in_img[idx]; out_img++; } return; } memset(out_img, 0, sizeof(float)*dst_ht*dst_wd*d); // resample each color channel separately) for( z=0; z<d; z++ ) { float *A = in_img + z; float *B = out_img + z * dst_ht * dst_wd; //cout << "z = " << z << endl; for( y=0; y<dst_ht; y++) { if(y==0) y1=0; ya=yas[y1]; yb=ybs[y1]; wt=ywts[y1]; wt1=1-wt; x=0; /// xma four points in org img for bilinear interpolation /// xma z*org_ht*org_wd is color channel offset, A0=A+ya*org_wd*d; // point to current row based on ya, (memory channel is interleaved, so each row takes org_wd*d spaces) /// bilinear interpolation, each direction, need to use 4 points to estimate the final value A1=A0+org_wd*d ; A2=A1+org_wd*d ; A3=A2+org_wd*d ; /// compute the pointer to the resampled image, current scale(for interleaved color channel) B0=B+yb*dst_wd; //cout << "ya = " << ya << " yb = " << yb << " wt = " << wt << " wt1 = " << wt1 << endl; //cout << "A0 = " << *A0 << " A1 = " << *A1 << " A2 = " << *A2 << " A3 = " << *A3 << endl; // resample along y direction if( org_ht==2*dst_ht ) { //cout << "testing scale height by 1/2." << endl; for(; x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d]; } y1 += 2; } else if( org_ht==3*dst_ht ) { //cout << "testing scale height by 1/3." << endl; for(;x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d] + A2[x*d]; } y1+=3; } else if( org_ht==4*dst_ht ) { //cout << "testing scale height by 1/4." << endl; for(;x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d] + A2[x*d] + A3[x*d]; } y1+=4; } else if( org_ht > dst_ht ) { //cout << "testing scale height by any other number." << endl; int m=1; while( y1+m<hn && yb==ybs[y1+m] ) m++; //cout << "hn = " << hn << " y1 = " << y1 << " m = " << m << endl; if(m==1) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1]; } } if(m==2) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1]; } } if(m==3) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1] + A2[x*d] * ywts[y1+2]; } } if(m>=4) { for(; x < org_wd;++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1] + A2[x*d] * ywts[y1+2] + A3[x*d] * ywts[y1+3]; } } for( int y0=4; y0<m; y0++ ) { A1=A0+y0*org_wd*d; wt1=ywts[y1+y0]; x=0; for(; x < org_wd; ++x) { C[x] = C[x] + A1[x*d]*wt1; } } y1+=m; } else { //cout << "testing scale height up " << endl; bool yBd = y < ybd[0] || y>=dst_ht-ybd[1]; y1++; //cout << "yBd = " << yBd << " ybd[0] = " << ybd[0] << " ybd[1] = " << ybd[1] << " y1 = " << y1 << endl; if(yBd) for(int tempx = 0; tempx < org_wd; ++tempx) C[tempx] = A0[tempx*d]; else { for(int tempx = 0; tempx < org_wd; ++tempx) { C[tempx] = A0[tempx*d]*wt + A1[tempx*d]*wt1; } } } // resample along x direction (B -> C) if( org_wd==dst_wd*2 ) { //cout << "testing scale width by 1/2." << endl; float r2 = r/2; for(x=0 ; x < dst_wd; x++ ) B0[x]=(C[2*x]+C[2*x+1])*r2; } else if( org_wd==dst_wd*3 ) { //cout << "testing scale width by 1/3." << endl; for(x=0; x<dst_wd; x++) B0[x]=(C[3*x]+C[3*x+1]+C[3*x+2])*(r/3); } else if( org_wd==dst_wd*4 ) { //cout << "testing scale width by 1/4." << endl; for(x=0; x<dst_wd; x++) B0[x]=(C[4*x]+C[4*x+1]+C[4*x+2]+C[4*x+3])*(r/4); } else if( org_wd>dst_wd ) { //cout << "testing scale width by any number." << endl; //cout << "xbd[0] = " << xbd[0] << endl; x=0; //#define U(o) C[xa+o]*xwts[x*4+o] if(xbd[0]==2) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1];// U(0)+U(1); } if(xbd[0]==3) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2];//U(0)+U(1)+U(2); } if(xbd[0]==4) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2] + C[xa+3]*xwts[x*4+3];//U(0)+U(1)+U(2)+U(3); } if(xbd[0]>4) for(; x<wn; x++) { B0[xbs[x]] += C[xas[x]] * xwts[x]; } } else { //cout << "testing scale width up!" << endl; for(x=0; x<xbd[0]; x++) B0[x] = C[xas[x]]*xwts[x]; for(; x<dst_wd-xbd[1]; x++) B0[x] = C[xas[x]]*xwts[x]+C[xas[x]+1]*(r-xwts[x]); for(; x<dst_wd; x++) B0[x] = C[xas[x]]*xwts[x]; } } } delete[] C; delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } /// bilinear interpolation methods to resize image (array version, no SSE) /// note that for the input array, the different color channels are separated, linearly sotred in memory,same for the output array void img_process::imResample_array_lin2lin(float* in_img, float* out_img, int d, int org_ht, int org_wd, int dst_ht, int dst_wd, float r ) { int hn, wn, x, /*x1,*/ y, z, xa, /*xb,*/ ya, yb, y1 /* xma added to convert from col major to row major*/; float *A0, *A1, *A2, *A3, *B0, wt, wt1; float *C = new float[org_wd+4]; for(x=org_wd; x<org_wd+4; x++) C[x]=0; //bool sse = (typeid(T)==typeid(float)) && !(size_t(A)&15) && !(size_t(B)&15); // sse = false // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, org_wd, output wn, xas, xbs, xwts, xbd,0 /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); //cout << "org_wd = " << org_wd << " dst_wd = " << dst_wd << " wn = " << wn << " ybd[0] = " << ybd[0] << " ybd[1] = " << ybd[1] << endl; //cout << "org_ht = " << org_ht << " dst_ht = " << dst_ht << " hn = " << hn << " xbd[0] = " << xbd[0] << " xbd[1] = " << xbd[1] << endl; if( org_ht==2*dst_ht ) r/=2; if( org_ht==3*dst_ht ) r/=3; if( org_ht==4*dst_ht ) r/=4; r/=float(1+1e-6); for( x=0; x<wn; x++ ) { xwts[x] *= r; //cout << "xwts[" << x << "] = " << xwts[x] << endl; } //cout << "r = " << r << endl; memset(out_img, 0, sizeof(float)*dst_ht*dst_wd*d); // resample each color channel separately) for( z=0; z<d; z++ ) { float *A = in_img + z * org_ht * org_wd; float *B = out_img + z * dst_ht * dst_wd; //cout << "z = " << z << endl; for( y=0; y<dst_ht; y++) { if(y==0) y1=0; ya=yas[y1]; yb=ybs[y1]; wt=ywts[y1]; wt1=1-wt; x=0; /// xma four points in org img for bilinear interpolation /// xma z*org_ht*org_wd is color channel offset, A0=A+ya*org_wd; // point to current row based on ya, (memory channel is linear, so each row is org_wd ) /// bilinear interpolation, each direction, need to use 4 points to estimate the final value A1=A0+org_wd ; A2=A1+org_wd ; A3=A2+org_wd ; /// compute the pointer to the resampled image, current scale(for interleaved color channel) B0=B+yb*dst_wd; //cout << "ya = " << ya << " yb = " << yb << " wt = " << wt << " wt1 = " << wt1 << endl; //cout << "A0 = " << *A0 << " A1 = " << *A1 << " A2 = " << *A2 << " A3 = " << *A3 << endl; // resample along y direction if( org_ht==2*dst_ht ) { //cout << "testing scale height by 1/2." << endl; for(; x < org_wd; ++x) { C[x] = A0[x] + A1[x]; } y1 += 2; } else if( org_ht==3*dst_ht ) { //cout << "testing scale height by 1/3." << endl; for(;x < org_wd; ++x) { C[x] = A0[x] + A1[x] + A2[x]; } y1+=3; } else if( org_ht==4*dst_ht ) { //cout << "testing scale height by 1/4." << endl; for(;x < org_wd; ++x) { C[x] = A0[x] + A1[x] + A2[x] + A3[x]; } y1+=4; } else if( org_ht > dst_ht ) { //cout << "testing scale height by any other number." << endl; int m=1; while( y1+m<hn && yb==ybs[y1+m] ) m++; //cout << "hn = " << hn << " y1 = " << y1 << " m = " << m << endl; if(m==1) { for(;x < org_wd; ++x) { C[x] = A0[x] * ywts[y1]; } } if(m==2) { for(;x < org_wd; ++x) { C[x] = A0[x] * ywts[y1] + A1[x] * ywts[y1+1]; } } if(m==3) { for(;x < org_wd; ++x) { C[x] = A0[x] * ywts[y1] + A1[x] * ywts[y1+1] + A2[x] * ywts[y1+2]; } } if(m>=4) { for(; x < org_wd;++x) { C[x] = A0[x] * ywts[y1] + A1[x] * ywts[y1+1] + A2[x] * ywts[y1+2] + A3[x] * ywts[y1+3]; } } for( int y0=4; y0<m; y0++ ) { A1=A0+y0*org_wd; wt1=ywts[y1+y0]; x=0; for(; x < org_wd; ++x) { C[x] = C[x] + A1[x]*wt1; } } y1+=m; } else { //cout << "testing scale height up " << endl; bool yBd = y < ybd[0] || y>=dst_ht-ybd[1]; y1++; //cout << "yBd = " << yBd << " ybd[0] = " << ybd[0] << " ybd[1] = " << ybd[1] << " y1 = " << y1 << endl; if(yBd) for(int tempx = 0; tempx < org_wd; ++tempx) C[tempx] = A0[tempx]; else { for(int tempx = 0; tempx < org_wd; ++tempx) { C[tempx] = A0[tempx]*wt + A1[tempx]*wt1; } } } // resample along x direction (B -> C) if( org_wd==dst_wd*2 ) { //cout << "testing scale width by 1/2." << endl; float r2 = r/2; for(x=0 ; x < dst_wd; x++ ) B0[x]=(C[2*x]+C[2*x+1])*r2; } else if( org_wd==dst_wd*3 ) { //cout << "testing scale width by 1/3." << endl; for(x=0; x<dst_wd; x++) B0[x]=(C[3*x]+C[3*x+1]+C[3*x+2])*(r/3); } else if( org_wd==dst_wd*4 ) { //cout << "testing scale width by 1/4." << endl; for(x=0; x<dst_wd; x++) B0[x]=(C[4*x]+C[4*x+1]+C[4*x+2]+C[4*x+3])*(r/4); } else if( org_wd>dst_wd ) { //cout << "testing scale width by any number." << endl; //cout << "xbd[0] = " << xbd[0] << endl; x=0; //#define U(o) C[xa+o]*xwts[x*4+o] if(xbd[0]==2) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1];// U(0)+U(1); } if(xbd[0]==3) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2];//U(0)+U(1)+U(2); } if(xbd[0]==4) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2] + C[xa+3]*xwts[x*4+3];//U(0)+U(1)+U(2)+U(3); } if(xbd[0]>4) for(; x<wn; x++) { B0[xbs[x]] += C[xas[x]] * xwts[x]; } } else { //cout << "testing scale width up!" << endl; for(x=0; x<xbd[0]; x++) B0[x] = C[xas[x]]*xwts[x]; for(; x<dst_wd-xbd[1]; x++) B0[x] = C[xas[x]]*xwts[x]+C[xas[x]+1]*(r-xwts[x]); for(; x<dst_wd; x++) B0[x] = C[xas[x]]*xwts[x]; } } } delete[] C; delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } void img_process::ConvTri1(float* I, float* O, int ht, int wd, int dim, float p, int s) { const float nrm = 1.0f/((p+2)*(p+2)); float *It, *Im, *Ib, *T= new float[wd]; /// perform convTri dimension by dimension for( int d0=0; d0<dim; d0++ ) { for(int y=s/2; y<ht; y+= s ) /// this is equivalent to i = 0 to ht { /// point It to the current dim and row It= Im = Ib = I+ y*wd+d0*ht*wd; if(y>0) /// not the first row, let It point to previous row It-=wd; if(y < ht-1) /// not the last row, let Ib point to next row Ib+=wd; for(int x=0; x< wd; ++x ) T[x]=nrm*(It[x]+p*Im[x]+Ib[x]); ConvTri1X(T,O,wd,p,s); O += wd/s; /// point to next row } } } void img_process::ConvTri1X(float* I, float* O, int wd, float p, int s) { int j = 0; O[j]=(1+p)*I[j]+I[j+1]; ++j; for(; j < wd - 1; ++j ) O[j]=I[j-1]+p*I[j]+I[j+1]; O[j]=I[j-1]+(1+p)*I[j]; } /// copy the opencv mat files to array with interleaved color channels void img_process::get_pix_all_scales_int(cv::Mat& img, const vector<cv::Size>& scales, float* pix_array) { #ifdef __OUTPUT_PIX__ ofstream pix_out; pix_out.open("pix_out_int.txt",ios::out); #endif for(vector<cv::Size>::const_iterator ii = scales.begin(); ii != scales.end(); ++ii) { cv::Mat img_small; unsigned height = static_cast<unsigned>(ii->height); unsigned width = static_cast<unsigned>(ii->width); if(height == static_cast<unsigned>(img.rows) && width == static_cast<unsigned>(img.cols)) img_small = img; else imResample(img, img_small, height,width, 1.0f); //cout << "Currently at scale " << ii - scales.begin() << ", height = " << img_small.rows << " width = " << img_small.cols << ", number of channels = " << img_small.channels() << endl; float* mat_ptr = img_small.ptr<float>(0); unsigned array_sz = width*height*img_small.channels(); memcpy(pix_array, mat_ptr, array_sz*sizeof(float)); #ifdef __OUTPUT_PIX__ for(int i = 0; i < img_small.channels(); ++i) for(unsigned j = i; j < array_sz; j+= img_small.channels()) pix_out << pix_array[j] << " "; pix_out << endl << endl; #endif pix_array += array_sz; } #ifdef __OUTPUT_PIX__ pix_out.close(); #endif return; } /// copy opencv mat files to array with linear ordered color channels (each channel is stored separatly in memory) /// this process is slightly slower than the interleaved memroy access (not able to use memcpy) void img_process::get_pix_all_scales_lin(cv::Mat& img, const vector<cv::Size>& scales, float* pix_array) { #ifdef __OUTPUT_PIX__ ofstream pix_out; pix_out.open("pix_out_lin.txt",ios::out); #endif int arr_sz = static_cast<unsigned>(scales[0].height) * static_cast<unsigned>(scales[0].width) * img.channels(); float* img_small = new float[arr_sz]; float* mat_ptr = img.ptr<float>(0); for(vector<cv::Size>::const_iterator ii = scales.begin(); ii != scales.end(); ++ii) { //cout << "Currently at scale # " << ii-scales.begin() << endl; int height = static_cast<int>(ii->height); int width = static_cast<int>(ii->width); int array_sz = width*height*img.channels(); imResample_array_int2lin(mat_ptr, img_small, img.channels(), img.rows, img.cols, height, width, 1.0f); memcpy(pix_array, img_small, array_sz*sizeof(float)); #ifdef __OUTPUT_PIX__ for(int i = 0; i < array_sz; ++i) { pix_out << pix_array[i] << " "; } pix_out << endl << endl; #endif pix_array += array_sz; } #ifdef __OUTPUT_PIX__ pix_out.close(); #endif delete[] img_small; return; }
8510709a1bd849db5f0d3e86571c402991f86e27.cu
#include "img_process.hpp" #include <fstream> #include <stdio.h> #include <opencv2/opencv.hpp> //#define __OUTPUT_PIX__ #define BLOCK_SIZE 32 __constant__ __device__ float lTable_const[1064]; __constant__ __device__ float mr_const[3]; __constant__ __device__ float mg_const[3]; __constant__ __device__ float mb_const[3]; #define FATAL(msg, ...) \ do {\ fprintf(stderr, "[%s:%d] "msg"\n", __FILE__, __LINE__, ##__VA_ARGS__);\ exit(-1);\ } while(0) img_process::img_process() { } img_process::~img_process() { } /*****************************************************************************/ /* CUDA KERNELS */ /*****************************************************************************/ __global__ void convert_to_luv_gpu_kernel(unsigned char *in_img, float *out_img, int cols, int rows, bool use_rgb) { float r, g, b, l, u, v, x, y, z, lt; unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < cols) && (y_pos < rows)) { unsigned int pos = (y_pos * cols) + x_pos; if (use_rgb) { r = (float)in_img[(3 * pos)]; g = (float)in_img[(3 * pos) + 1]; b = (float)in_img[(3 * pos) + 2]; } else { b = (float)in_img[(3 * pos)]; g = (float)in_img[(3 * pos) + 1]; r = (float)in_img[(3 * pos) + 2]; } x = (mr_const[0] * r) + (mg_const[0] * g) + (mb_const[0] * b); y = (mr_const[1] * r) + (mg_const[1] * g) + (mb_const[1] * b); z = (mr_const[2] * r) + (mg_const[2] * g) + (mb_const[2] * b); float maxi = 1.0f / 270; float minu = -88.0f * maxi; float minv = -134.0f * maxi; float un = 0.197833f; float vn = 0.468331f; lt = lTable_const[static_cast<int>((y*1024))]; l = lt; z = 1/(x + (15 * y) + (3 * z) + (float)1e-35); u = lt * (13 * 4 * x * z - 13 * un) - minu; v = lt * (13 * 9 * y * z - 13 * vn) - minv; out_img[(3 * pos)] = l; out_img[(3 * pos) + 1] = u; out_img[(3 * pos) + 2] = v; } } __global__ void trianguler_convolution_gpu_kernel(float *dev_I, float *dev_O, float *T0, float *T1, float *T2, int wd, int ht, float nrm, float p) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < wd) && (y_pos < ht)) { float *It0, *It1, *It2, *Im0, *Im1, *Im2, *Ib0, *Ib1, *Ib2; float *Ot0, *Ot1, *Ot2; float *T00, *T10, *T20; It0 = Im0 = Ib0 = dev_I + (y_pos * wd) + (0 * ht * wd); It1 = Im1 = Ib1 = dev_I + (y_pos * wd) + (1 * ht * wd); It2 = Im2 = Ib2 = dev_I + (y_pos * wd) + (2 * ht * wd); Ot0 = dev_O + (y_pos * wd) + (0 * ht * wd); Ot1 = dev_O + (y_pos * wd) + (1 * ht * wd); Ot2 = dev_O + (y_pos * wd) + (2 * ht * wd); T00 = T0 + (y_pos * wd); T10 = T1 + (y_pos * wd); T20 = T2 + (y_pos * wd); if(y_pos > 0) { /// not the first row, let It point to previous row It0 -= wd; It1 -= wd; It2 -= wd; } if(y_pos < ht - 1) { /// not the last row, let Ib point to next row Ib0 += wd; Ib1 += wd; Ib2 += wd; } T00[x_pos] = nrm * (It0[x_pos] + (p * Im0[x_pos]) + Ib0[x_pos]); T10[x_pos] = nrm * (It1[x_pos] + (p * Im1[x_pos]) + Ib1[x_pos]); T20[x_pos] = nrm * (It2[x_pos] + (p * Im2[x_pos]) + Ib2[x_pos]); __syncthreads(); if (x_pos == 0) { Ot0[x_pos] = ((1 + p) * T00[x_pos]) + T00[x_pos + 1]; Ot1[x_pos] = ((1 + p) * T10[x_pos]) + T10[x_pos + 1]; Ot2[x_pos] = ((1 + p) * T20[x_pos]) + T20[x_pos + 1]; } else if (x_pos == wd - 1) { Ot0[x_pos] = T00[x_pos - 1] + ((1 + p) * T00[x_pos]); Ot1[x_pos] = T10[x_pos - 1] + ((1 + p) * T10[x_pos]); Ot2[x_pos] = T20[x_pos - 1] + ((1 + p) * T20[x_pos]); } else { Ot0[x_pos] = T00[x_pos - 1] + (p * T00[x_pos]) + T00[x_pos + 1]; Ot1[x_pos] = T10[x_pos - 1] + (p * T10[x_pos]) + T10[x_pos + 1]; Ot2[x_pos] = T20[x_pos - 1] + (p * T20[x_pos]) + T20[x_pos + 1]; } __syncthreads(); } } __global__ void lin2lin_resmpl_good_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int *yas_const, int *ybs_const) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < dst_wd) && (y_pos < dst_ht)) { int ya, yb; float *A00, *A01, *A02, *A03, *B00; float *A10, *A11, *A12, *A13, *B10; float *A20, *A21, *A22, *A23, *B20; float *A0 = dev_in_img + (0 * org_ht * org_wd); float *B0 = dev_out_img + (0 * dst_ht * dst_wd); float *A1 = dev_in_img + (1 * org_ht * org_wd); float *B1 = dev_out_img + (1 * dst_ht * dst_wd); float *A2 = dev_in_img + (2 * org_ht * org_wd); float *B2 = dev_out_img + (2 * dst_ht * dst_wd); if (org_ht == dst_ht && org_wd == dst_wd) { int out_img_idx = y_pos + (dst_wd * x_pos); B0[out_img_idx] = A0[out_img_idx * n_channels]; B1[out_img_idx] = A1[out_img_idx * n_channels]; B2[out_img_idx] = A2[out_img_idx * n_channels]; return; } int y1 = 0; if (org_ht == 2 * dst_ht) { y1 += 2 * y_pos; } else if (org_ht == 3 * dst_ht) { y1 += 3 * y_pos; } else if (org_ht == 4 * dst_ht) { y1 += 4 * y_pos; } if (y_pos == 0) y1 = 0; ya = yas_const[y1]; A00 = A0 + (ya * org_wd); A01 = A00 + (org_wd); A02 = A01 + (org_wd); A03 = A02 + (org_wd); A10 = A1 + (ya * org_wd); A11 = A00 + (org_wd); A12 = A01 + (org_wd); A13 = A02 + (org_wd); A20 = A2 + (ya * org_wd); A21 = A00 + (org_wd); A22 = A01 + (org_wd); A23 = A02 + (org_wd); yb = ybs_const[y1]; B00 = B0 + (yb * dst_wd); B10 = B1 + (yb * dst_wd); B20 = B2 + (yb * dst_wd); // resample along y direction if (org_ht == 2 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos]; dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos]; dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos]; } else if (org_ht == 3 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos] + A02[x_pos]; dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos] + A12[x_pos]; dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos] + A22[x_pos]; } else if (org_ht == 4 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos] + A01[x_pos] + A02[x_pos] + A03[x_pos]; dev_C1_tmp[x_pos] = A10[x_pos] + A11[x_pos] + A12[x_pos] + A13[x_pos]; dev_C2_tmp[x_pos] = A20[x_pos] + A21[x_pos] + A22[x_pos] + A23[x_pos]; } /* ensure that all threads have calculated the values for C until this point */ __syncthreads(); // resample along x direction (B -> C) if (org_wd == 2 * dst_wd) { B00[x_pos]= (dev_C0_tmp[2 * x_pos] + dev_C0_tmp[(2 * x_pos) + 1]) * (r / 2); B10[x_pos]= (dev_C1_tmp[2 * x_pos] + dev_C1_tmp[(2 * x_pos) + 1]) * (r / 2); B20[x_pos]= (dev_C2_tmp[2 * x_pos] + dev_C2_tmp[(2 * x_pos) + 1]) * (r / 2); } else if (org_wd == 3 * dst_wd) { B00[x_pos] = (dev_C0_tmp[3 * x_pos] + dev_C0_tmp[(3 * x_pos) + 1] + dev_C0_tmp[(3 * x_pos) + 2]) * (r / 3); B10[x_pos] = (dev_C1_tmp[3 * x_pos] + dev_C1_tmp[(3 * x_pos) + 1] + dev_C1_tmp[(3 * x_pos) + 2]) * (r / 3); B20[x_pos] = (dev_C2_tmp[3 * x_pos] + dev_C2_tmp[(3 * x_pos) + 1] + dev_C2_tmp[(3 * x_pos) + 2]) * (r / 3); } else if (org_wd == 4 * dst_wd) { B00[x_pos] = (dev_C0_tmp[4 * x_pos] + dev_C0_tmp[(4 * x_pos) + 1] + dev_C0_tmp[(4 * x_pos) + 2] + dev_C0_tmp[(4 * x_pos) + 3]) * (r / 4); B10[x_pos] = (dev_C1_tmp[4 * x_pos] + dev_C1_tmp[(4 * x_pos) + 1] + dev_C1_tmp[(4 * x_pos) + 2] + dev_C1_tmp[(4 * x_pos) + 3]) * (r / 4); B20[x_pos] = (dev_C2_tmp[4 * x_pos] + dev_C2_tmp[(4 * x_pos) + 1] + dev_C2_tmp[(4 * x_pos) + 2] + dev_C2_tmp[(4 * x_pos) + 3]) * (r / 4); } __syncthreads(); } } __global__ void lin2lin_resmpl_messy_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int hn, int wn, int xbd0, int xbd1, int ybd0, int ybd1, int *xas_const, int *xbs_const, float *xwts_const, int *yas_const, int *ybs_const, float *ywts_const) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < dst_wd) && (y_pos < dst_ht)) { int xa, ya, yb; float wt, wt1; float *A00, *A01, *A02, *A03, *B00; float *A10, *A11, *A12, *A13, *B10; float *A20, *A21, *A22, *A23, *B20; float *A0 = dev_in_img + 0; float *B0 = dev_out_img + (0 * dst_ht * dst_wd); float *A1 = dev_in_img + 1; float *B1 = dev_out_img + (1 * dst_ht * dst_wd); float *A2 = dev_in_img + 2; float *B2 = dev_out_img + (2 * dst_ht * dst_wd); int y1 = 0; if (org_ht > dst_ht) { int m = 1; for (int iter = 0; iter < y_pos; iter++) { while (y1 + m < hn && yb == ybs_const[y1 + m]) m++; y1 += m; } wt = ywts_const[y1]; wt1 = 1 - wt; } else { y1 = y_pos; wt = ywts_const[y1]; wt1 = 1 - wt; } if (y_pos == 0) y1 = 0; ya = yas_const[y1]; A00 = A0 + (ya * org_wd * n_channels); A01 = A00 + (org_wd * n_channels); A02 = A01 + (org_wd * n_channels); A03 = A02 + (org_wd * n_channels); A10 = A1 + (ya * org_wd * n_channels); A11 = A00 + (org_wd * n_channels); A12 = A01 + (org_wd * n_channels); A13 = A02 + (org_wd * n_channels); A20 = A2 + (ya * org_wd * n_channels); A21 = A00 + (org_wd * n_channels); A22 = A01 + (org_wd * n_channels); A23 = A02 + (org_wd * n_channels); yb = ybs_const[y1]; B00 = B0 + (yb * dst_wd); B10 = B1 + (yb * dst_wd); B20 = B2 + (yb * dst_wd); int x = 0; if (org_wd < x_pos) { // resample along y direction if (org_ht > dst_ht) { int m = 1; while ((y1 + m < hn) && (yb == ybs_const[y1 + m])) m++; if (m == 1) { dev_C0_tmp[x_pos] = A00[x_pos] * ywts_const[y1]; dev_C1_tmp[x_pos] = A10[x_pos] * ywts_const[y1]; dev_C2_tmp[x_pos] = A20[x_pos] * ywts_const[y1]; } else if (m == 2) { dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) + (A01[x_pos] * ywts_const[y1 + 1]); dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) + (A11[x_pos] * ywts_const[y1 + 1]); dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) + (A21[x_pos] * ywts_const[y1 + 1]); } else if (m == 3) { dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) + (A01[x_pos] * ywts_const[y1 + 1]) + (A02[x_pos] * ywts_const[y1 + 2]); dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) + (A11[x_pos] * ywts_const[y1 + 1]) + (A12[x_pos] * ywts_const[y1 + 2]); dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) + (A21[x_pos] * ywts_const[y1 + 1]) + (A22[x_pos] * ywts_const[y1 + 2]); } else if (m >= 4) { dev_C0_tmp[x_pos] = (A00[x_pos] * ywts_const[y1 + 0]) + (A01[x_pos] * ywts_const[y1 + 1]) + (A02[x_pos] * ywts_const[y1 + 2]) + (A03[x_pos] * ywts_const[y1 + 3]); dev_C1_tmp[x_pos] = (A10[x_pos] * ywts_const[y1 + 0]) + (A11[x_pos] * ywts_const[y1 + 1]) + (A12[x_pos] * ywts_const[y1 + 2]) + (A13[x_pos] * ywts_const[y1 + 3]); dev_C2_tmp[x_pos] = (A20[x_pos] * ywts_const[y1 + 0]) + (A21[x_pos] * ywts_const[y1 + 1]) + (A22[x_pos] * ywts_const[y1 + 2]) + (A23[x_pos] * ywts_const[y1 + 3]); } for (int y0 = 4; y0 < m; y0++) { A01 = A00 + (y0 * org_wd); A11 = A10 + (y0 * org_wd); A11 = A10 + (y0 * org_wd); wt1 = ywts_const[y1 + y0]; dev_C0_tmp[x_pos] = dev_C0_tmp[x_pos] + (A01[x_pos] * wt1); dev_C1_tmp[x_pos] = dev_C1_tmp[x_pos] + (A11[x_pos] * wt1); dev_C2_tmp[x_pos] = dev_C2_tmp[x_pos] + (A21[x_pos] * wt1); } } else { bool yBd = y_pos < ybd0 || y_pos >= dst_ht - ybd1; if (yBd) { dev_C0_tmp[x_pos] = A00[x_pos]; dev_C1_tmp[x_pos] = A10[x_pos]; dev_C2_tmp[x_pos] = A20[x_pos]; } else { dev_C0_tmp[x_pos] = (A00[x_pos] * wt) + (A01[x_pos] * wt1); dev_C1_tmp[x_pos] = (A10[x_pos] * wt) + (A11[x_pos] * wt1); dev_C2_tmp[x_pos] = (A20[x_pos] * wt) + (A21[x_pos] * wt1); } } } /* ensure that all threads have calculated the values for C until this point */ __syncthreads(); // resample along x direction (B -> C) if (x_pos < dst_wd) { if (org_wd > dst_wd) { if (xbd0 == 2) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); } else if (xbd0 == 3) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); } else if (xbd0 == 4) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C0_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C1_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C2_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); } else if (xbd0 > 4) { for(x = 0; x < wn; x++) { B00[xbs_const[x]] += dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[xbs_const[x]] += dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[xbs_const[x]] += dev_C2_tmp[xas_const[x]] * xwts_const[x]; } } } else { for (x = 0; x < xbd0; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x]; } for (; x < dst_wd - xbd1; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x] + dev_C0_tmp[xas_const[x] + 1] * (r - xwts_const[x]); B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x] + dev_C1_tmp[xas_const[x] + 1] * (r - xwts_const[x]); B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x] + dev_C2_tmp[xas_const[x] + 1] * (r - xwts_const[x]); } for (; x < dst_wd; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x]; } } } __syncthreads(); } } __global__ void int2lin_resmpl_good_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int *yas_const, int *ybs_const) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < dst_wd) && (y_pos < dst_ht)) { int ya, yb; float *A00, *A01, *A02, *A03, *B00; float *A10, *A11, *A12, *A13, *B10; float *A20, *A21, *A22, *A23, *B20; float *A0 = dev_in_img + 0; float *B0 = dev_out_img + (0 * dst_ht * dst_wd); float *A1 = dev_in_img + 1; float *B1 = dev_out_img + (1 * dst_ht * dst_wd); float *A2 = dev_in_img + 2; float *B2 = dev_out_img + (2 * dst_ht * dst_wd); if (org_ht == dst_ht && org_wd == dst_wd) { int out_img_idx = y_pos + (dst_wd * x_pos); B0[out_img_idx] = A0[out_img_idx * n_channels]; B1[out_img_idx] = A1[out_img_idx * n_channels]; B2[out_img_idx] = A2[out_img_idx * n_channels]; return; } int y1 = 0; if (org_ht == 2 * dst_ht) { y1 += 2 * y_pos; } else if (org_ht == 3 * dst_ht) { y1 += 3 * y_pos; } else if (org_ht == 4 * dst_ht) { y1 += 4 * y_pos; } if (y_pos == 0) y1 = 0; ya = yas_const[y1]; A00 = A0 + (ya * org_wd * n_channels); A01 = A00 + (org_wd * n_channels); A02 = A01 + (org_wd * n_channels); A03 = A02 + (org_wd * n_channels); A10 = A1 + (ya * org_wd * n_channels); A11 = A00 + (org_wd * n_channels); A12 = A01 + (org_wd * n_channels); A13 = A02 + (org_wd * n_channels); A20 = A2 + (ya * org_wd * n_channels); A21 = A00 + (org_wd * n_channels); A22 = A01 + (org_wd * n_channels); A23 = A02 + (org_wd * n_channels); yb = ybs_const[y1]; B00 = B0 + (yb * dst_wd); B10 = B1 + (yb * dst_wd); B20 = B2 + (yb * dst_wd); // resample along y direction if (org_ht == 2 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels] + A01[x_pos * n_channels]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels] + A11[x_pos * n_channels]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels] + A21[x_pos * n_channels]; } else if (org_ht == 3 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels] + A01[x_pos * n_channels] + A02[x_pos * n_channels]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels] + A11[x_pos * n_channels] + A12[x_pos * n_channels]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels] + A21[x_pos * n_channels] + A22[x_pos * n_channels]; } else if (org_ht == 4 * dst_ht) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels] + A01[x_pos * n_channels] + A02[x_pos * n_channels] + A03[x_pos * n_channels]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels] + A11[x_pos * n_channels] + A12[x_pos * n_channels] + A13[x_pos * n_channels]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels] + A21[x_pos * n_channels] + A22[x_pos * n_channels] + A23[x_pos * n_channels]; } /* ensure that all threads have calculated the values for C until this point */ __syncthreads(); // resample along x direction (B -> C) if (org_wd == 2 * dst_wd) { B00[x_pos]= (dev_C0_tmp[2 * x_pos] + dev_C0_tmp[(2 * x_pos) + 1]) * (r / 2); B10[x_pos]= (dev_C1_tmp[2 * x_pos] + dev_C1_tmp[(2 * x_pos) + 1]) * (r / 2); B20[x_pos]= (dev_C2_tmp[2 * x_pos] + dev_C2_tmp[(2 * x_pos) + 1]) * (r / 2); } else if (org_wd == 3 * dst_wd) { B00[x_pos] = (dev_C0_tmp[3 * x_pos] + dev_C0_tmp[(3 * x_pos) + 1] + dev_C0_tmp[(3 * x_pos) + 2]) * (r / 3); B10[x_pos] = (dev_C1_tmp[3 * x_pos] + dev_C1_tmp[(3 * x_pos) + 1] + dev_C1_tmp[(3 * x_pos) + 2]) * (r / 3); B20[x_pos] = (dev_C2_tmp[3 * x_pos] + dev_C2_tmp[(3 * x_pos) + 1] + dev_C2_tmp[(3 * x_pos) + 2]) * (r / 3); } else if (org_wd == 4 * dst_wd) { B00[x_pos] = (dev_C0_tmp[4 * x_pos] + dev_C0_tmp[(4 * x_pos) + 1] + dev_C0_tmp[(4 * x_pos) + 2] + dev_C0_tmp[(4 * x_pos) + 3]) * (r / 4); B10[x_pos] = (dev_C1_tmp[4 * x_pos] + dev_C1_tmp[(4 * x_pos) + 1] + dev_C1_tmp[(4 * x_pos) + 2] + dev_C1_tmp[(4 * x_pos) + 3]) * (r / 4); B20[x_pos] = (dev_C2_tmp[4 * x_pos] + dev_C2_tmp[(4 * x_pos) + 1] + dev_C2_tmp[(4 * x_pos) + 2] + dev_C2_tmp[(4 * x_pos) + 3]) * (r / 4); } __syncthreads(); } } __global__ void int2lin_resmpl_messy_gpu_kernel(float *dev_in_img, float *dev_out_img, float *dev_C0_tmp, float *dev_C1_tmp, float *dev_C2_tmp, int org_wd, int org_ht, int dst_wd, int dst_ht, int n_channels, float r, int hn, int wn, int xbd0, int xbd1, int ybd0, int ybd1, int *xas_const, int *xbs_const, float *xwts_const, int *yas_const, int *ybs_const, float *ywts_const) { unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x); unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y); if ((x_pos < dst_wd) && (y_pos < dst_ht)) { int xa, ya, yb; float wt, wt1; float *A00, *A01, *A02, *A03, *B00; float *A10, *A11, *A12, *A13, *B10; float *A20, *A21, *A22, *A23, *B20; float *A0 = dev_in_img + 0; float *B0 = dev_out_img + (0 * dst_ht * dst_wd); float *A1 = dev_in_img + 1; float *B1 = dev_out_img + (1 * dst_ht * dst_wd); float *A2 = dev_in_img + 2; float *B2 = dev_out_img + (2 * dst_ht * dst_wd); int y1 = 0; if (org_ht > dst_ht) { int m = 1; for (int iter = 0; iter < y_pos; iter++) { while (y1 + m < hn && yb == ybs_const[y1 + m]) m++; y1 += m; } wt = ywts_const[y1]; wt1 = 1 - wt; } else { y1 = y_pos; wt = ywts_const[y1]; wt1 = 1 - wt; } if (y_pos == 0) y1 = 0; ya = yas_const[y1]; A00 = A0 + (ya * org_wd * n_channels); A01 = A00 + (org_wd * n_channels); A02 = A01 + (org_wd * n_channels); A03 = A02 + (org_wd * n_channels); A10 = A1 + (ya * org_wd * n_channels); A11 = A00 + (org_wd * n_channels); A12 = A01 + (org_wd * n_channels); A13 = A02 + (org_wd * n_channels); A20 = A2 + (ya * org_wd * n_channels); A21 = A00 + (org_wd * n_channels); A22 = A01 + (org_wd * n_channels); A23 = A02 + (org_wd * n_channels); yb = ybs_const[y1]; B00 = B0 + (yb * dst_wd); B10 = B1 + (yb * dst_wd); B20 = B2 + (yb * dst_wd); if (x_pos < org_wd) { // resample along y direction if (org_ht > dst_ht) { int m = 1; while ((y1 + m < hn) && (yb == ybs_const[y1 + m])) m++; if (m == 1) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels] * ywts_const[y1]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels] * ywts_const[y1]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels] * ywts_const[y1]; } else if (m == 2) { dev_C0_tmp[x_pos] = (A00[x_pos * n_channels] * ywts_const[y1 + 0]) + (A01[x_pos * n_channels] * ywts_const[y1 + 1]); dev_C1_tmp[x_pos] = (A10[x_pos * n_channels] * ywts_const[y1 + 0]) + (A11[x_pos * n_channels] * ywts_const[y1 + 1]); dev_C2_tmp[x_pos] = (A20[x_pos * n_channels] * ywts_const[y1 + 0]) + (A21[x_pos * n_channels] * ywts_const[y1 + 1]); } else if (m == 3) { dev_C0_tmp[x_pos] = (A00[x_pos * n_channels] * ywts_const[y1 + 0]) + (A01[x_pos * n_channels] * ywts_const[y1 + 1]) + (A02[x_pos * n_channels] * ywts_const[y1 + 2]); dev_C1_tmp[x_pos] = (A10[x_pos * n_channels] * ywts_const[y1 + 0]) + (A11[x_pos * n_channels] * ywts_const[y1 + 1]) + (A12[x_pos * n_channels] * ywts_const[y1 + 2]); dev_C2_tmp[x_pos] = (A20[x_pos * n_channels] * ywts_const[y1 + 0]) + (A21[x_pos * n_channels] * ywts_const[y1 + 1]) + (A22[x_pos * n_channels] * ywts_const[y1 + 2]); } else if (m >= 4) { dev_C0_tmp[x_pos] = (A00[x_pos * n_channels] * ywts_const[y1 + 0]) + (A01[x_pos * n_channels] * ywts_const[y1 + 1]) + (A02[x_pos * n_channels] * ywts_const[y1 + 2]) + (A03[x_pos * n_channels] * ywts_const[y1 + 3]); dev_C1_tmp[x_pos] = (A10[x_pos * n_channels] * ywts_const[y1 + 0]) + (A11[x_pos * n_channels] * ywts_const[y1 + 1]) + (A12[x_pos * n_channels] * ywts_const[y1 + 2]) + (A13[x_pos * n_channels] * ywts_const[y1 + 3]); dev_C2_tmp[x_pos] = (A20[x_pos * n_channels] * ywts_const[y1 + 0]) + (A21[x_pos * n_channels] * ywts_const[y1 + 1]) + (A22[x_pos * n_channels] * ywts_const[y1 + 2]) + (A23[x_pos * n_channels] * ywts_const[y1 + 3]); } for (int y0 = 4; y0 < m; y0++) { A01 = A00 + (y0 * org_wd * n_channels); A11 = A10 + (y0 * org_wd * n_channels); A11 = A10 + (y0 * org_wd * n_channels); wt1 = ywts_const[y1 + y0]; dev_C0_tmp[x_pos] = dev_C0_tmp[x_pos] + (A01[x_pos * n_channels] * wt1); dev_C1_tmp[x_pos] = dev_C1_tmp[x_pos] + (A11[x_pos * n_channels] * wt1); dev_C2_tmp[x_pos] = dev_C2_tmp[x_pos] + (A21[x_pos * n_channels] * wt1); } } else { bool yBd = y_pos < ybd0 || y_pos >= dst_ht - ybd1; if (yBd) { dev_C0_tmp[x_pos] = A00[x_pos * n_channels]; dev_C1_tmp[x_pos] = A10[x_pos * n_channels]; dev_C2_tmp[x_pos] = A20[x_pos * n_channels]; } else { dev_C0_tmp[x_pos] = (A00[x_pos * n_channels] * wt) + (A01[x_pos * n_channels] * wt1); dev_C1_tmp[x_pos] = (A10[x_pos * n_channels] * wt) + (A11[x_pos * n_channels] * wt1); dev_C2_tmp[x_pos] = (A20[x_pos * n_channels] * wt) + (A21[x_pos * n_channels] * wt1); } } } /* ensure that all threads have calculated the values for C until this point */ __syncthreads(); if (x_pos < dst_wd) { // resample along x direction (B -> C) if (org_wd > dst_wd) { if (xbd0 == 2) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]); } else if (xbd0 == 3) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]); } else if (xbd0 == 4) { xa = xas_const[x_pos * 4]; B00[x_pos] = (dev_C0_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C0_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C0_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C0_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); B10[x_pos] = (dev_C1_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C1_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C1_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C1_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); B20[x_pos] = (dev_C2_tmp[xa + 0] * xwts_const[(4 * x_pos) + 0]) + (dev_C2_tmp[xa + 1] * xwts_const[(4 * x_pos) + 1]) + (dev_C2_tmp[xa + 2] * xwts_const[(4 * x_pos) + 2]) + (dev_C2_tmp[xa + 3] * xwts_const[(4 * x_pos) + 3]); } else if (xbd0 > 4) { for(int x = 0; x < wn; x++) { B00[xbs_const[x]] += dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[xbs_const[x]] += dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[xbs_const[x]] += dev_C2_tmp[xas_const[x]] * xwts_const[x]; } } } else { int x = 0; for (x = 0; x < xbd0; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x]; } for (; x < dst_wd - xbd1; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x] + dev_C0_tmp[xas_const[x] + 1] * (r - xwts_const[x]); B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x] + dev_C1_tmp[xas_const[x] + 1] * (r - xwts_const[x]); B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x] + dev_C2_tmp[xas_const[x] + 1] * (r - xwts_const[x]); } for (; x < dst_wd; x++) { B00[x] = dev_C0_tmp[xas_const[x]] * xwts_const[x]; B10[x] = dev_C1_tmp[xas_const[x]] * xwts_const[x]; B20[x] = dev_C2_tmp[xas_const[x]] * xwts_const[x]; } } } __syncthreads(); } } /***********************************************************************************/ /* GPU Functions to launch CUDA KERNELS */ /* These are basically ported versions of CPU functions as wrappers around kernels */ /***********************************************************************************/ void img_process::rgb2luv_gpu(cv::Mat& in_img, cv::Mat& out_img, float nrm, bool useRGB) { CV_Assert(in_img.type() == CV_32FC3); static int cnt; if (cnt == 0) { rgb2luv_setup_gpu(nrm); } cv::Mat res_img(in_img.rows, in_img.cols, CV_32FC3); out_img = res_img; cudaError_t cuda_ret; #if 0 unsigned char *dev_input_img; /* input image is of type 8UC3 (8 bit unsigned 3 channel) */ float *dev_output_luv_img; /* output image is of type 32FC3 (32 bit float 3 channel) */ #endif unsigned int in_img_size_total = in_img.step * in_img.rows; unsigned int out_img_size_total = res_img.step * res_img.rows; if (cnt == 0) { /* Allocate required memory on GPU device for both input and output images */ cuda_ret = cudaMalloc((void **)&dev_input_img, in_img_size_total); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void **)&dev_output_luv_img, out_img_size_total); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cnt++; } /* Copy data from OpenCV input image to device memory */ cuda_ret = cudaMemcpy(dev_input_img, in_img.ptr<unsigned char>(0), in_img_size_total, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy to device memory"); /* Specify a reasonable block size */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); /* Calculate grid size to cover the whole image */ const dim3 dim_grid(ceil(in_img.cols / BLOCK_SIZE), ceil(in_img.rows / BLOCK_SIZE)); convert_to_luv_gpu_kernel<<<dim_grid, dim_block>>>(dev_input_img, dev_output_luv_img, in_img.cols, in_img.rows, useRGB); /* Synchronize to check for any kernel launch errors */ cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); /* Copy back data from device memory to OpenCV output image */ cuda_ret = cudaMemcpy(res_img.ptr<float>(0), dev_output_luv_img, out_img_size_total, cudaMemcpyDeviceToHost); if (cuda_ret != cudaSuccess) FATAL("Unable to copy from device memory"); #if 0 /* Free the device memory */ cuda_ret = cudaFree(dev_input_img); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_output_luv_img); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); #endif return; } void img_process::rgb2luv_gpu(cv::Mat& in_img, cv::Mat& out_img) { CV_Assert(in_img.type() == CV_8UC3); float nrm = 1.0f/255; static int cnt; if (cnt == 0) { rgb2luv_setup_gpu(nrm); } cv::Mat res_img(in_img.rows, in_img.cols, CV_32FC3); out_img = res_img; cudaError_t cuda_ret; unsigned int in_img_size_total = in_img.step * in_img.rows; unsigned int out_img_size_total = res_img.step * res_img.rows; if (cnt == 0) { /* Allocate required memory on GPU device for both input and output images */ cuda_ret = cudaMalloc((void **)&dev_input_img, in_img_size_total); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cuda_ret = cudaMalloc((void **)&dev_output_luv_img, out_img_size_total); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); cnt++; } /* Copy data from OpenCV input image to device memory */ cuda_ret = cudaMemcpy(dev_input_img, in_img.ptr<unsigned char>(0), in_img_size_total, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy to device memory"); /* Specify a reasonable block size */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); /* Calculate grid size to cover the whole image */ const dim3 dim_grid(ceil(in_img.cols / BLOCK_SIZE), ceil(in_img.rows / BLOCK_SIZE)); convert_to_luv_gpu_kernel<<<dim_grid, dim_block>>>(dev_input_img, dev_output_luv_img, in_img.cols, in_img.rows, false); /* Synchronize to check for any kernel launch errors */ cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); /* Copy back data from device memory to OpenCV output image */ cuda_ret = cudaMemcpy(res_img.ptr<float>(0), dev_output_luv_img, out_img_size_total, cudaMemcpyDeviceToHost); if (cuda_ret != cudaSuccess) FATAL("Unable to copy from device memory"); return; } void img_process::free_gpu(void) { cudaError_t cuda_ret; /* Free the device memory */ if (dev_input_img) { cuda_ret = cudaFree(dev_input_img); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); dev_input_img = NULL; } if (dev_output_luv_img) { cuda_ret = cudaFree(dev_output_luv_img); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); dev_output_luv_img = NULL; } } void img_process::rgb2luv_setup_gpu(float nrm) { /* set constants for conversion */ const float y0 = ((6.0f / 29) * (6.0f / 29) * (6.0f / 29)); const float a = ((29.0f / 3) * (29.0f / 3) * (29.0f / 3)); mr[0] = 0.430574f * nrm; mr[1] = 0.222015f * nrm; mr[2] = 0.020183f * nrm; mg[0] = 0.341550f * nrm; mg[1] = 0.706655f * nrm; mg[2] = 0.129553f * nrm; mb[0] = 0.178325f * nrm; mb[1] = 0.071330f * nrm; mb[2] = 0.939180f * nrm; cudaError_t cuda_ret; cuda_ret = cudaMemcpyToSymbol(mr_const, mr, sizeof(float) * 3, 0); if (cuda_ret != cudaSuccess) FATAL("Unable to copy to constant memory"); cuda_ret = cudaMemcpyToSymbol(mg_const, mg, sizeof(float) * 3, 0); if (cuda_ret != cudaSuccess) FATAL("Unable to copy to constant memory"); cuda_ret = cudaMemcpyToSymbol(mb_const, mb, sizeof(float) * 3, 0); if (cuda_ret != cudaSuccess) FATAL("Unable to copy to constant memory"); /* build (padded) lookup table for y->l conversion assuming y in [0,1] */ float maxi = 1.0f / 270; float y, l; for (int i = 0; i < 1025; i++) { y = (i / 1024.0); l = y > y0 ? 116 * pow((double)y, 1.0 / 3.0) - 16 : y * a; lTable[i] = l * maxi; } for(int i = 1025; i < 1064; i++) lTable[i] = lTable[i - 1]; cuda_ret = cudaMemcpyToSymbol(lTable_const, lTable, sizeof(float) * 1064, 0); if (cuda_ret != cudaSuccess) FATAL("Unable to copy to constant memory"); return; } void img_process::imResample_array_int2lin_gpu(float* in_img_gpu, float* out_img, int n_channels, int org_ht, int org_wd, int dst_ht, int dst_wd, float r) { int hn, wn; // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, dst_wd, output --> wn, xas, xbs, xwts, xbd /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); if (org_ht == 2 * dst_ht) r /= 2; if (org_ht == 3 * dst_ht) r /= 3; if (org_ht == 4 * dst_ht) r /= 4; r /= float(1 + 1e-6); for (int x = 0; x < wn; x++) xwts[x] *= r; memset(out_img, 0, sizeof(float) * dst_ht * dst_wd * n_channels); cudaError_t cuda_ret; float *dev_out_rsmpl_img, *dev_C_temp0, *dev_C_temp1, *dev_C_temp2; int *xas_const = NULL, *xbs_const = NULL, *yas_const = NULL, *ybs_const = NULL; float *xwts_const = NULL, *ywts_const = NULL; int out_img_size_total = sizeof(float) * dst_ht * dst_wd * n_channels; cuda_ret = cudaMalloc((void **)&dev_C_temp0, sizeof(float) * (org_wd + 4)); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&dev_C_temp1, sizeof(float) * (org_wd + 4)); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&dev_C_temp2, sizeof(float) * (org_wd + 4)); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); /* output image size changes frequently have to allocate each time */ cuda_ret = cudaMalloc((void **)&dev_out_rsmpl_img, out_img_size_total); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&yas_const, sizeof(int) * hn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&ybs_const, sizeof(int) * hn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&ywts_const, sizeof(float) * hn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); /* wait all till malloc finishes */ cuda_ret = cudaDeviceSynchronize(); cuda_ret = cudaMemcpy(yas_const, yas, sizeof(int) * hn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(ybs_const, ybs, sizeof(int) * hn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(ywts_const, ywts, sizeof(float) * hn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); /* choose grid to cover entire output image */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); const dim3 dim_grid(ceil(dst_wd / BLOCK_SIZE), ceil(dst_ht / BLOCK_SIZE)); //cudaStream_t stream[n_channels]; /* Create CUDA streams so that each channel operations can be done simultaneously */ /*for (int iter = 0; iter < n_channels; iter++) { cuda_ret = cudaStreamCreate(&stream[iter]); if(cuda_ret != cudaSuccess) FATAL("Unable to create CUDA streams"); }*/ cuda_ret = cudaDeviceSynchronize(); if ((org_ht == dst_ht) || (org_ht == 2 * dst_ht) || (org_ht == 3 * dst_ht) || (org_ht == 4 * dst_ht)) { //for (int n = 0; n < n_channels; n++) { int2lin_resmpl_good_gpu_kernel<<<dim_grid, dim_block>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp0, dev_C_temp1, dev_C_temp2, org_wd, org_ht, dst_wd, dst_ht, n_channels, r, yas_const, ybs_const); //resample_chnl_gpu_kernel1<<<dim_grid, dim_block, 0, stream[1]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp1, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 1, r, yas_const, ybs_const); //resample_chnl_gpu_kernel1<<<dim_grid, dim_block, 0, stream[2]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp2, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 2, r, yas_const, ybs_const); } /* wait for all streams to finish computing */ cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel2"); } else { cuda_ret = cudaMalloc((void **)&xas_const, sizeof(int) * wn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&xbs_const, sizeof(int) * wn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&xwts_const, sizeof(float) * wn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaDeviceSynchronize(); cuda_ret = cudaMemcpy(xas_const, xas, sizeof(int) * wn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(xbs_const, xbs, sizeof(int) * wn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(xwts_const, xwts, sizeof(float) * wn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaDeviceSynchronize(); //for (int n = 0; n < n_channels; n++) { int2lin_resmpl_messy_gpu_kernel<<<dim_grid, dim_block>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp0, dev_C_temp1, dev_C_temp2, org_wd, org_ht, dst_wd, dst_ht, n_channels, r, hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], xas_const, xbs_const, xwts_const, yas_const, ybs_const, ywts_const); //resample_chnl_gpu_kernel2<<<dim_grid, dim_block, 0, stream[1]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp1, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 1, r, // hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], // xas_const, xbs_const, xwts_const, // yas_const, ybs_const, ywts_const); //resample_chnl_gpu_kernel2<<<dim_grid, dim_block, 0, stream[2]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp2, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 2, r, // hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], // xas_const, xbs_const, xwts_const, // yas_const, ybs_const, ywts_const); } /* wait for all streams to finish computing */ cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel2"); cuda_ret = cudaFree(xas_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(xbs_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(xwts_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); } cuda_ret = cudaMemcpy(out_img, dev_out_rsmpl_img, out_img_size_total, cudaMemcpyDeviceToHost); if (cuda_ret != cudaSuccess) FATAL("Unable to copy from device memory"); cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel2"); cuda_ret = cudaFree(dev_out_rsmpl_img); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_C_temp0); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_C_temp1); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_C_temp2); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(yas_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(ybs_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(ywts_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); /*for (int i = 0; i < n_channels; i++) { cuda_ret = cudaStreamDestroy(stream[i]); if(cuda_ret != cudaSuccess) FATAL("Unable to destroy CUDA streams"); }*/ cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel2"); delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } void img_process::imResample_array_lin2lin_gpu(float* in_img, float* out_img, int n_channels, int org_ht, int org_wd, int dst_ht, int dst_wd, float r) { int hn, wn; // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, dst_wd, output --> wn, xas, xbs, xwts, xbd /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); if (org_ht == 2 * dst_ht) r /= 2; if (org_ht == 3 * dst_ht) r /= 3; if (org_ht == 4 * dst_ht) r /= 4; r /= float(1 + 1e-6); for (int x = 0; x < wn; x++) xwts[x] *= r; memset(out_img, 0, sizeof(float) * dst_ht * dst_wd * n_channels); cudaError_t cuda_ret; float *in_img_temp, *dev_out_rsmpl_img, *dev_C_temp0, *dev_C_temp1, *dev_C_temp2; int *xas_const = NULL, *xbs_const = NULL, *yas_const = NULL, *ybs_const = NULL; float *xwts_const = NULL, *ywts_const = NULL; int in_img_size_total = sizeof(float) * org_ht * org_wd * n_channels; int out_img_size_total = sizeof(float) * dst_ht * dst_wd * n_channels; cuda_ret = cudaMalloc((void **)&dev_C_temp0, sizeof(float) * (org_wd + 4)); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&dev_C_temp1, sizeof(float) * (org_wd + 4)); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&dev_C_temp2, sizeof(float) * (org_wd + 4)); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); /* output image size changes frequently have to allocate each time */ cuda_ret = cudaMalloc((void **)&dev_out_rsmpl_img, out_img_size_total); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); /* input image size changes frequently have to allocate each time */ cuda_ret = cudaMalloc((void **)&in_img_temp, in_img_size_total); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&yas_const, sizeof(int) * hn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&ybs_const, sizeof(int) * hn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&ywts_const, sizeof(float) * hn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); /* wait all till malloc finishes */ cuda_ret = cudaDeviceSynchronize(); //cout << "here2\n"; cuda_ret = cudaMemcpy(in_img_temp, in_img, in_img_size_total, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(yas_const, yas, sizeof(int) * hn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(ybs_const, ybs, sizeof(int) * hn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(ywts_const, ywts, sizeof(float) * hn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); /* choose grid to cover entire output image */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); const dim3 dim_grid(ceil(dst_wd / BLOCK_SIZE), ceil(dst_ht / BLOCK_SIZE)); ; /*cudaStream_t stream[n_channels]; /* Create CUDA streams so that each channel operations can be done simultaneously */ /*for (int iter = 0; iter < n_channels; iter++) { cuda_ret = cudaStreamCreate(&stream[iter]); if(cuda_ret != cudaSuccess) FATAL("Unable to create CUDA streams"); }*/ cudaDeviceSynchronize(); if ((org_ht == dst_ht) || (org_ht == 2 * dst_ht) || (org_ht == 3 * dst_ht) || (org_ht == 4 * dst_ht)) { //for (int n = 0; n < n_channels; n++) { lin2lin_resmpl_good_gpu_kernel<<<dim_grid, dim_block>>>(in_img_temp, dev_out_rsmpl_img, dev_C_temp0, dev_C_temp1, dev_C_temp2, org_wd, org_ht, dst_wd, dst_ht, n_channels, r, yas_const, ybs_const); //resample_chnl_gpu_kernel1<<<dim_grid, dim_block, 0, stream[1]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp1, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 1, r, yas_const, ybs_const); //resample_chnl_gpu_kernel1<<<dim_grid, dim_block, 0, stream[2]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp2, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 2, r, yas_const, ybs_const); } /* wait for all streams to finish computing */ cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel3"); } else { cuda_ret = cudaMalloc((void **)&xas_const, sizeof(int) * wn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&xbs_const, sizeof(int) * wn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&xwts_const, sizeof(float) * wn); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cudaDeviceSynchronize(); cuda_ret = cudaMemcpy(xas_const, xas, sizeof(int) * wn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(xbs_const, xbs, sizeof(int) * wn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cuda_ret = cudaMemcpy(xwts_const, xwts, sizeof(float) * wn, cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cudaDeviceSynchronize(); //for (int n = 0; n < n_channels; n++) { lin2lin_resmpl_messy_gpu_kernel<<<dim_grid, dim_block>>>(in_img_temp, dev_out_rsmpl_img, dev_C_temp0, dev_C_temp1, dev_C_temp2, org_wd, org_ht, dst_wd, dst_ht, n_channels, r, hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], xas_const, xbs_const, xwts_const, yas_const, ybs_const, ywts_const); //resample_chnl_gpu_kernel2<<<dim_grid, dim_block, 0, stream[1]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp1, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 1, r, // hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], // xas_const, xbs_const, xwts_const, // yas_const, ybs_const, ywts_const); //resample_chnl_gpu_kernel2<<<dim_grid, dim_block, 0, stream[2]>>>(in_img_gpu, dev_out_rsmpl_img, dev_C_temp2, // org_wd, org_ht, dst_wd, dst_ht, // n_channels, 2, r, // hn, wn, xbd[0], xbd[1], ybd[0], ybd[1], // xas_const, xbs_const, xwts_const, // yas_const, ybs_const, ywts_const); } /* wait for all streams to finish computing */ cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel4"); cuda_ret = cudaFree(xas_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(xbs_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(xwts_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); } cuda_ret = cudaMemcpy(out_img, dev_out_rsmpl_img, out_img_size_total, cudaMemcpyDeviceToHost); if (cuda_ret != cudaSuccess) FATAL("Unable to copy from device memory"); cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel2"); cuda_ret = cudaFree(dev_out_rsmpl_img); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(in_img_temp); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_C_temp0); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_C_temp1); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_C_temp2); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(yas_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(ybs_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(ywts_const); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); /*for (int i = 0; i < n_channels; i++) { cuda_ret = cudaStreamDestroy(stream[i]); if(cuda_ret != cudaSuccess) FATAL("Unable to destroy CUDA streams"); }*/ cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel2"); delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } void img_process::ConvTri1_gpu(float* I, float* O, int ht, int wd, int dim, float p, int s) { const float nrm = 1.0f / ((p + 2) * (p + 2)); cudaError_t cuda_ret; float *dev_I, *dev_O, *dev_T0, *dev_T1, *dev_T2; cuda_ret = cudaMalloc((void **)&dev_I, sizeof(float) * (ht * wd * dim)); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&dev_O, sizeof(float) * (ht * wd * dim)); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&dev_T0, sizeof(float) * ht * wd); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&dev_T1, sizeof(float) * ht * wd); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cuda_ret = cudaMalloc((void **)&dev_T2, sizeof(float) * ht * wd); if (cuda_ret != cudaSuccess) FATAL("Unable to allocate memory"); cudaDeviceSynchronize(); cuda_ret = cudaMemcpy(dev_I, I, sizeof(float) * (ht * wd * dim), cudaMemcpyHostToDevice); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cudaDeviceSynchronize(); /* choose grid to cover entire output image */ const dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE); const dim3 dim_grid(ceil(wd / BLOCK_SIZE), ceil(ht / BLOCK_SIZE)); trianguler_convolution_gpu_kernel<<<dim_grid, dim_block>>>(dev_I, dev_O, dev_T0, dev_T1, dev_T2, wd, ht, nrm, p); cuda_ret = cudaDeviceSynchronize(); if (cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); cuda_ret = cudaMemcpy(O, dev_O, sizeof(float) * (ht * wd * dim), cudaMemcpyDeviceToHost); if (cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cudaDeviceSynchronize(); cuda_ret = cudaFree(dev_I); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_O); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_T0); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_T1); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cuda_ret = cudaFree(dev_T2); if (cuda_ret != cudaSuccess) FATAL("Unable to free device memory"); cudaDeviceSynchronize(); } /******************************************************************************/ /* CPU Functions */ /******************************************************************************/ void img_process::rgb2luv(cv::Mat& in_img, cv::Mat& out_img, float nrm, bool useRGB) { CV_Assert( in_img.type() == CV_32FC3); rgb2luv_setup(nrm); float *R, *G, *B; if(!useRGB) /// default RGB order R = in_img.ptr<float>(0), G = in_img.ptr<float>(0) + 1, B = in_img.ptr<float>(0) + 2; else /// use opencv's built in RGB order: B = in_img.ptr<float>(0), G = in_img.ptr<float>(0) + 1, R = in_img.ptr<float>(0) + 2; cv::Mat res_img(in_img.rows, in_img.cols, CV_32FC3); out_img = res_img; int n = in_img.rows * in_img.cols; /// xma opencv order of each channel: /// get l,u,v pointer and r g b pointer float *L=out_img.ptr<float>(0), *U=out_img.ptr<float>(0) + 1, *V=out_img.ptr<float>(0) + 2; for( int i=0; i<n; i++ ) { float r, g, b, x, y, z, l; r=*R; g=*G; b=*B; R += 3; G += 3; B += 3; x = mr[0]*r + mg[0]*g + mb[0]*b; y = mr[1]*r + mg[1]*g + mb[1]*b; z = mr[2]*r + mg[2]*g + mb[2]*b; l = lTable[static_cast<int>((y*1024))]; *(L) = l; z = 1/(x + 15*y + 3*z + (float)1e-35); *(U) = l * (13*4*x*z - 13*un) - minu; *(V) = l * (13*9*y*z - 13*vn) - minv; L += 3; U += 3; V += 3; } return; } void img_process::rgb2luv(cv::Mat& in_img, cv::Mat& out_img) { CV_Assert( in_img.type() == CV_8UC3); float nrm = 1.0f/255; rgb2luv_setup(nrm); unsigned char *B = in_img.ptr<unsigned char>(0), *G = in_img.ptr<unsigned char>(0) + 1, *R = in_img.ptr<unsigned char>(0) + 2; cv::Mat res_img(in_img.rows, in_img.cols, CV_32FC3); out_img = res_img; int n = in_img.rows * in_img.cols; /// xma opencv order of each channel: /// get l,u,v pointer and r g b pointer float *L=out_img.ptr<float>(0), *U=out_img.ptr<float>(0) + 1, *V=out_img.ptr<float>(0) + 2; for( int i=0; i<n; i++ ) { float r, g, b, x, y, z, l; r=static_cast<float>(*R); g=static_cast<float>(*G); b=static_cast<float>(*B); R += 3; G += 3; B += 3; x = mr[0]*r + mg[0]*g + mb[0]*b; y = mr[1]*r + mg[1]*g + mb[1]*b; z = mr[2]*r + mg[2]*g + mb[2]*b; l = lTable[static_cast<int>((y*1024))]; *(L) = l; z = 1/(x + 15*y + 3*z + (float)1e-35); *(U) = l * (13*4*x*z - 13*un) - minu; *(V) = l * (13*9*y*z - 13*vn) - minv; L += 3; U += 3; V += 3; } return; } void img_process::rgb2luv_setup(float nrm) { // set constants for conversion const float y0 = ((6.0f/29)*(6.0f/29)*(6.0f/29)); const float a = ((29.0f/3)*(29.0f/3)*(29.0f/3)); un = 0.197833f; vn = 0.468331f; mr[0]= 0.430574f*nrm; mr[1]= 0.222015f*nrm; mr[2]= 0.020183f*nrm; mg[0]= 0.341550f*nrm; mg[1]= 0.706655f*nrm; mg[2]= 0.129553f*nrm; mb[0]= 0.178325f*nrm; mb[1]= 0.071330f*nrm; mb[2]= 0.939180f*nrm; float maxi= 1.0f/270; minu=-88.0f*maxi; minv=-134.0f*maxi; // build (padded) lookup table for y->l conversion assuming y in [0,1] float y, l; for(int i=0; i<1025; i++) { y = (i/1024.0); l = y>y0 ? 116*pow((double)y,1.0/3.0)-16 : y*a; lTable[i] = l*maxi; } for(int i=1025; i<1064; i++) lTable[i]=lTable[i-1]; return; } void img_process::resampleCoef( int ha, int hb, int &n, int *&yas, int *&ybs, float *&wts, int bd[2], int pad) { /// xma input: ha, hb, /// xma output: n, yas, ybs, wts, bd,0 /// xma s is the scale factor const float s = static_cast<float>(hb)/static_cast<float>(ha), sInv = 1/s; float wt, wt0=static_cast<float>(1e-3)*s; //cout << "s = " << s << " sInv = " << sInv << " wt0 = " << wt0 << " pad = " << pad << endl; /// determine either downsample or upsample for resampling bool ds=ha>hb; int nMax; bd[0]=bd[1]=0; if(ds) { n=0; nMax=ha+(pad>2 ? pad : 2)*hb; } else { n=nMax=hb; } //cout << "nMax = " << nMax << endl; // initialize memory wts = new float[nMax]; yas = new int[nMax]; ybs = new int[nMax]; if( ds ) { for( int yb=0; yb<hb; yb++ ) { // create coefficients for downsampling float ya0f=yb*sInv, ya1f=ya0f+sInv, W=0; int ya0=int(ceil(ya0f)), ya1=int(ya1f), n1=0; //cout << "ya0f = " << ya0f << ", ya1f = " << ya1f << ", ya0 = << " << ya0 << ", ya1 = " << ya1 << endl; for( int ya=ya0-1; ya<ya1+1; ya++ ) { wt=s; if(ya==ya0-1) wt=(ya0-ya0f)*s; else if(ya==ya1) wt=(ya1f-ya1)*s; /// only when the weight is larger than 10-3, consider it as a valid weight (at the edge). if(wt>wt0 && ya>=0) { ybs[n]=yb; yas[n]=ya; wts[n]=wt; n++; n1++; W+=wt; } } if(W>1) for( int i=0; i<n1; i++ ) wts[n-n1+i]/=W; if(n1>bd[0]) bd[0]=n1; while( n1<pad ) { ybs[n]=yb; yas[n]=yas[n-1]; wts[n]=0; n++; n1++; } } } else { for( int yb=0; yb<hb; yb++ ) { // create coefficients for upsampling float yaf = (float(.5)+yb)*sInv-float(.5); int ya=(int) floor(yaf); wt=1; if(ya>=0 && ya<ha-1) wt=1-(yaf-ya); if(ya<0) { ya=0; bd[0]++; } if(ya>=ha-1) { ya=ha-1; bd[1]++; } ybs[yb]=yb; yas[yb]=ya; wts[yb]=wt; } } /* //cout << left << setw(15) << "wts " << left << setw(15) << "yas " << left << setw(15) << "ybs" << endl; for(int idx = 0; idx < nMax; ++idx) //cout << left << setw(15) << wts[idx] << left << setw(15) << yas[idx] << left << setw(15) << ybs[idx] << endl; //cout << "n = " << n << " bd[0] = " << bd[0] << " bd[1] = " << bd[1] << endl << endl << endl << endl; */ } /// bilinear interpolation methods to resize image (opencv mat version, no SSE, interleaved to interleaved memory) void img_process::imResample(cv::Mat& in_img, cv::Mat& out_img, int dheight, int dwidth, float r ) { cv::Mat img_resample = cv::Mat::zeros(dheight, dwidth, in_img.type()); int d = 1; if(in_img.type() == CV_32FC1) d = 1; else if(in_img.type() == CV_32FC2) d = 2; else if(in_img.type() == CV_32FC3) d = 3; else CV_Assert(0); int org_ht = in_img.rows, org_wd = in_img.cols, dst_ht = dheight, dst_wd = dwidth; out_img = img_resample; int hn, wn, x, /*x1,*/ y, z, xa, /*xb,*/ ya, yb, y1 /* xma added to convert from col major to row major*/; float *A0, *A1, *A2, *A3, *B0, wt, wt1; /// xma prepare 128-bit aligned array C of org height+4 and set boundary values to 0 float *C = new float[org_wd+4]; for(x=org_wd; x<org_wd+4; x++) C[x]=0; //bool sse = (typeid(T)==typeid(float)) && !(size_t(A)&15) && !(size_t(B)&15); // sse = false // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, org_wd, output wn, xas, xbs, xwts, xbd,0 /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); if( org_ht==2*dst_ht ) r/=2; if( org_ht==3*dst_ht ) r/=3; if( org_ht==4*dst_ht ) r/=4; r/=float(1+1e-6); for( x=0; x<wn; x++ ) { xwts[x] *= r; //cout << "xwts[" << x << "] = " << xwts[x] << endl; } // resample each color channel separately) for( z=0; z<d; z++ ) { float *A = in_img.ptr<float>(0) + z; float *B = img_resample.ptr<float>(0) + z; for( y=0; y<dst_ht; y++) { if(y==0) y1=0; ya=yas[y1]; yb=ybs[y1]; wt=ywts[y1]; wt1=1-wt; x=0; /// xma four points in org img for bilinear interpolation /// xma z*org_ht*org_wd is color channel offset, A0=A+ya*org_wd*d; // point to current row based on ya, (memory channel is interleaved, so each row takes org_wd*d spaces) /// bilinear interpolation, each direction, need to use 4 points to estimate the final value A1=A0+org_wd*d ; A2=A1+org_wd*d ; A3=A2+org_wd*d ; /// compute the pointer to the resampled image, current scale(for interleaved color channel) B0=B+yb*dst_wd*d; //cout << "ya = " << ya << " yb = " << yb << " wt = " << wt << " wt1 = " << wt1 << endl; //cout << "A0 = " << *A0 << " A1 = " << *A1 << " A2 = " << *A2 << " A3 = " << *A3 << endl; // resample along y direction if( org_ht==2*dst_ht ) { //cout << "testing scale height by 1/2." << endl; for(; x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d]; } y1 += 2; } else if( org_ht==3*dst_ht ) { //cout << "testing scale height by 1/3." << endl; for(;x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d] + A2[x*d]; } y1+=3; } else if( org_ht==4*dst_ht ) { //cout << "testing scale height by 1/4." << endl; for(;x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d] + A2[x*d] + A3[x*d]; } y1+=4; } else if( org_ht > dst_ht ) { //cout << "testing scale height by any other number." << endl; int m=1; while( y1+m<hn && yb==ybs[y1+m] ) m++; //cout << "hn = " << hn << " y1 = " << y1 << " m = " << m << endl; if(m==1) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1]; } } if(m==2) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1]; } } if(m==3) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1] + A2[x*d] * ywts[y1+2]; } } if(m>=4) { for(; x < org_wd;++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1] + A2[x*d] * ywts[y1+2] + A3[x*d] * ywts[y1+3]; } } for( int y0=4; y0<m; y0++ ) { A1=A0+y0*org_wd*d; wt1=ywts[y1+y0]; x=0; for(; x < org_wd; ++x) { C[x] = C[x] + A1[x*d]*wt1; } } y1+=m; } else { //cout << "testing scale height up " << endl; bool yBd = y < ybd[0] || y>=dst_ht-ybd[1]; y1++; //cout << "yBd = " << yBd << " ybd[0] = " << ybd[0] << " ybd[1] = " << ybd[1] << " y1 = " << y1 << endl; if(yBd) for(int tempx = 0; tempx < org_wd; ++tempx) C[tempx] = A0[tempx*d]; else { for(int tempx = 0; tempx < org_wd; ++tempx) { C[tempx] = A0[tempx*d]*wt + A1[tempx*d]*wt1; } } } // resample along x direction (B -> C) if( org_wd==dst_wd*2 ) { //cout << "testing scale width by 1/2." << endl; float r2 = r/2; for(x=0 ; x < dst_wd; x++ ) B0[x*d]=(C[2*x]+C[2*x+1])*r2; } else if( org_wd==dst_wd*3 ) { //cout << "testing scale width by 1/3." << endl; for(x=0; x<dst_wd; x++) B0[x*d]=(C[3*x]+C[3*x+1]+C[3*x+2])*(r/3); } else if( org_wd==dst_wd*4 ) { //cout << "testing scale width by 1/4." << endl; for(x=0; x<dst_wd; x++) B0[x*d]=(C[4*x]+C[4*x+1]+C[4*x+2]+C[4*x+3])*(r/4); } else if( org_wd>dst_wd ) { //cout << "testing scale width by any number." << endl; //cout << "xbd[0] = " << xbd[0] << endl; x=0; //#define U(o) C[xa+o]*xwts[x*4+o] if(xbd[0]==2) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x*d] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1];// U(0)+U(1); } if(xbd[0]==3) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x*d] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2];//U(0)+U(1)+U(2); } if(xbd[0]==4) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x*d] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2] + C[xa+3]*xwts[x*4+3];//U(0)+U(1)+U(2)+U(3); } if(xbd[0]>4) for(; x<wn; x++) { B0[xbs[x]*d] += C[xas[x]] * xwts[x]; } } else { //cout << "testing scale width up!" << endl; for(x=0; x<xbd[0]; x++) B0[x*d] = C[xas[x]]*xwts[x]; for(; x<dst_wd-xbd[1]; x++) B0[x*d] = C[xas[x]]*xwts[x]+C[xas[x]+1]*(r-xwts[x]); for(; x<dst_wd; x++) B0[x*d] = C[xas[x]]*xwts[x]; } } } delete[] C; delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } /// bilinear interpolation methods to resize image (array version, no SSE) /// note that for the input array, the different color channels are interleaved, but for the output array, the memory channels are separated void img_process::imResample_array_int2lin(float* in_img, float* out_img, int d, int org_ht, int org_wd, int dst_ht, int dst_wd, float r ) { int hn, wn, x, /*x1,*/ y, z, xa, /*xb,*/ ya, yb, y1 /* xma added to convert from col major to row major*/; float *A0, *A1, *A2, *A3, *B0, wt, wt1; /// xma prepare 128-bit aligned array C of org height+4 and set boundary values to 0 float *C = new float[org_wd+4]; for(x=org_wd; x<org_wd+4; x++) C[x]=0; //bool sse = (typeid(T)==typeid(float)) && !(size_t(A)&15) && !(size_t(B)&15); // sse = false // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, org_wd, output wn, xas, xbs, xwts, xbd,0 /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); if( org_ht==2*dst_ht ) r/=2; if( org_ht==3*dst_ht ) r/=3; if( org_ht==4*dst_ht ) r/=4; r/=float(1+1e-6); for( x=0; x<wn; x++ ) { xwts[x] *= r; //cout << "xwts[" << x << "] = " << xwts[x] << endl; } /// check if only re-assemble the pixel values: if(org_ht == dst_ht && org_wd == dst_wd) { for(int chn = 0; chn < d; ++chn) for(int idx = chn; idx < org_ht*org_wd*d; idx += d) { out_img[0] = in_img[idx]; out_img++; } return; } memset(out_img, 0, sizeof(float)*dst_ht*dst_wd*d); // resample each color channel separately) for( z=0; z<d; z++ ) { float *A = in_img + z; float *B = out_img + z * dst_ht * dst_wd; //cout << "z = " << z << endl; for( y=0; y<dst_ht; y++) { if(y==0) y1=0; ya=yas[y1]; yb=ybs[y1]; wt=ywts[y1]; wt1=1-wt; x=0; /// xma four points in org img for bilinear interpolation /// xma z*org_ht*org_wd is color channel offset, A0=A+ya*org_wd*d; // point to current row based on ya, (memory channel is interleaved, so each row takes org_wd*d spaces) /// bilinear interpolation, each direction, need to use 4 points to estimate the final value A1=A0+org_wd*d ; A2=A1+org_wd*d ; A3=A2+org_wd*d ; /// compute the pointer to the resampled image, current scale(for interleaved color channel) B0=B+yb*dst_wd; //cout << "ya = " << ya << " yb = " << yb << " wt = " << wt << " wt1 = " << wt1 << endl; //cout << "A0 = " << *A0 << " A1 = " << *A1 << " A2 = " << *A2 << " A3 = " << *A3 << endl; // resample along y direction if( org_ht==2*dst_ht ) { //cout << "testing scale height by 1/2." << endl; for(; x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d]; } y1 += 2; } else if( org_ht==3*dst_ht ) { //cout << "testing scale height by 1/3." << endl; for(;x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d] + A2[x*d]; } y1+=3; } else if( org_ht==4*dst_ht ) { //cout << "testing scale height by 1/4." << endl; for(;x < org_wd; ++x) { C[x] = A0[x*d] + A1[x*d] + A2[x*d] + A3[x*d]; } y1+=4; } else if( org_ht > dst_ht ) { //cout << "testing scale height by any other number." << endl; int m=1; while( y1+m<hn && yb==ybs[y1+m] ) m++; //cout << "hn = " << hn << " y1 = " << y1 << " m = " << m << endl; if(m==1) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1]; } } if(m==2) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1]; } } if(m==3) { for(;x < org_wd; ++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1] + A2[x*d] * ywts[y1+2]; } } if(m>=4) { for(; x < org_wd;++x) { C[x] = A0[x*d] * ywts[y1] + A1[x*d] * ywts[y1+1] + A2[x*d] * ywts[y1+2] + A3[x*d] * ywts[y1+3]; } } for( int y0=4; y0<m; y0++ ) { A1=A0+y0*org_wd*d; wt1=ywts[y1+y0]; x=0; for(; x < org_wd; ++x) { C[x] = C[x] + A1[x*d]*wt1; } } y1+=m; } else { //cout << "testing scale height up " << endl; bool yBd = y < ybd[0] || y>=dst_ht-ybd[1]; y1++; //cout << "yBd = " << yBd << " ybd[0] = " << ybd[0] << " ybd[1] = " << ybd[1] << " y1 = " << y1 << endl; if(yBd) for(int tempx = 0; tempx < org_wd; ++tempx) C[tempx] = A0[tempx*d]; else { for(int tempx = 0; tempx < org_wd; ++tempx) { C[tempx] = A0[tempx*d]*wt + A1[tempx*d]*wt1; } } } // resample along x direction (B -> C) if( org_wd==dst_wd*2 ) { //cout << "testing scale width by 1/2." << endl; float r2 = r/2; for(x=0 ; x < dst_wd; x++ ) B0[x]=(C[2*x]+C[2*x+1])*r2; } else if( org_wd==dst_wd*3 ) { //cout << "testing scale width by 1/3." << endl; for(x=0; x<dst_wd; x++) B0[x]=(C[3*x]+C[3*x+1]+C[3*x+2])*(r/3); } else if( org_wd==dst_wd*4 ) { //cout << "testing scale width by 1/4." << endl; for(x=0; x<dst_wd; x++) B0[x]=(C[4*x]+C[4*x+1]+C[4*x+2]+C[4*x+3])*(r/4); } else if( org_wd>dst_wd ) { //cout << "testing scale width by any number." << endl; //cout << "xbd[0] = " << xbd[0] << endl; x=0; //#define U(o) C[xa+o]*xwts[x*4+o] if(xbd[0]==2) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1];// U(0)+U(1); } if(xbd[0]==3) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2];//U(0)+U(1)+U(2); } if(xbd[0]==4) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2] + C[xa+3]*xwts[x*4+3];//U(0)+U(1)+U(2)+U(3); } if(xbd[0]>4) for(; x<wn; x++) { B0[xbs[x]] += C[xas[x]] * xwts[x]; } } else { //cout << "testing scale width up!" << endl; for(x=0; x<xbd[0]; x++) B0[x] = C[xas[x]]*xwts[x]; for(; x<dst_wd-xbd[1]; x++) B0[x] = C[xas[x]]*xwts[x]+C[xas[x]+1]*(r-xwts[x]); for(; x<dst_wd; x++) B0[x] = C[xas[x]]*xwts[x]; } } } delete[] C; delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } /// bilinear interpolation methods to resize image (array version, no SSE) /// note that for the input array, the different color channels are separated, linearly sotred in memory,same for the output array void img_process::imResample_array_lin2lin(float* in_img, float* out_img, int d, int org_ht, int org_wd, int dst_ht, int dst_wd, float r ) { int hn, wn, x, /*x1,*/ y, z, xa, /*xb,*/ ya, yb, y1 /* xma added to convert from col major to row major*/; float *A0, *A1, *A2, *A3, *B0, wt, wt1; float *C = new float[org_wd+4]; for(x=org_wd; x<org_wd+4; x++) C[x]=0; //bool sse = (typeid(T)==typeid(float)) && !(size_t(A)&15) && !(size_t(B)&15); // sse = false // get coefficients for resampling along w and h int *xas, *xbs, *yas, *ybs; float *xwts, *ywts; int xbd[2], ybd[2]; /// xma resampleCoef input is only org_wd, org_wd, output wn, xas, xbs, xwts, xbd,0 /// vertical coef resampleCoef( org_wd, dst_wd, wn, xas, xbs, xwts, xbd, 4 ); /// horizontal coef resampleCoef( org_ht, dst_ht, hn, yas, ybs, ywts, ybd, 0 ); //cout << "org_wd = " << org_wd << " dst_wd = " << dst_wd << " wn = " << wn << " ybd[0] = " << ybd[0] << " ybd[1] = " << ybd[1] << endl; //cout << "org_ht = " << org_ht << " dst_ht = " << dst_ht << " hn = " << hn << " xbd[0] = " << xbd[0] << " xbd[1] = " << xbd[1] << endl; if( org_ht==2*dst_ht ) r/=2; if( org_ht==3*dst_ht ) r/=3; if( org_ht==4*dst_ht ) r/=4; r/=float(1+1e-6); for( x=0; x<wn; x++ ) { xwts[x] *= r; //cout << "xwts[" << x << "] = " << xwts[x] << endl; } //cout << "r = " << r << endl; memset(out_img, 0, sizeof(float)*dst_ht*dst_wd*d); // resample each color channel separately) for( z=0; z<d; z++ ) { float *A = in_img + z * org_ht * org_wd; float *B = out_img + z * dst_ht * dst_wd; //cout << "z = " << z << endl; for( y=0; y<dst_ht; y++) { if(y==0) y1=0; ya=yas[y1]; yb=ybs[y1]; wt=ywts[y1]; wt1=1-wt; x=0; /// xma four points in org img for bilinear interpolation /// xma z*org_ht*org_wd is color channel offset, A0=A+ya*org_wd; // point to current row based on ya, (memory channel is linear, so each row is org_wd ) /// bilinear interpolation, each direction, need to use 4 points to estimate the final value A1=A0+org_wd ; A2=A1+org_wd ; A3=A2+org_wd ; /// compute the pointer to the resampled image, current scale(for interleaved color channel) B0=B+yb*dst_wd; //cout << "ya = " << ya << " yb = " << yb << " wt = " << wt << " wt1 = " << wt1 << endl; //cout << "A0 = " << *A0 << " A1 = " << *A1 << " A2 = " << *A2 << " A3 = " << *A3 << endl; // resample along y direction if( org_ht==2*dst_ht ) { //cout << "testing scale height by 1/2." << endl; for(; x < org_wd; ++x) { C[x] = A0[x] + A1[x]; } y1 += 2; } else if( org_ht==3*dst_ht ) { //cout << "testing scale height by 1/3." << endl; for(;x < org_wd; ++x) { C[x] = A0[x] + A1[x] + A2[x]; } y1+=3; } else if( org_ht==4*dst_ht ) { //cout << "testing scale height by 1/4." << endl; for(;x < org_wd; ++x) { C[x] = A0[x] + A1[x] + A2[x] + A3[x]; } y1+=4; } else if( org_ht > dst_ht ) { //cout << "testing scale height by any other number." << endl; int m=1; while( y1+m<hn && yb==ybs[y1+m] ) m++; //cout << "hn = " << hn << " y1 = " << y1 << " m = " << m << endl; if(m==1) { for(;x < org_wd; ++x) { C[x] = A0[x] * ywts[y1]; } } if(m==2) { for(;x < org_wd; ++x) { C[x] = A0[x] * ywts[y1] + A1[x] * ywts[y1+1]; } } if(m==3) { for(;x < org_wd; ++x) { C[x] = A0[x] * ywts[y1] + A1[x] * ywts[y1+1] + A2[x] * ywts[y1+2]; } } if(m>=4) { for(; x < org_wd;++x) { C[x] = A0[x] * ywts[y1] + A1[x] * ywts[y1+1] + A2[x] * ywts[y1+2] + A3[x] * ywts[y1+3]; } } for( int y0=4; y0<m; y0++ ) { A1=A0+y0*org_wd; wt1=ywts[y1+y0]; x=0; for(; x < org_wd; ++x) { C[x] = C[x] + A1[x]*wt1; } } y1+=m; } else { //cout << "testing scale height up " << endl; bool yBd = y < ybd[0] || y>=dst_ht-ybd[1]; y1++; //cout << "yBd = " << yBd << " ybd[0] = " << ybd[0] << " ybd[1] = " << ybd[1] << " y1 = " << y1 << endl; if(yBd) for(int tempx = 0; tempx < org_wd; ++tempx) C[tempx] = A0[tempx]; else { for(int tempx = 0; tempx < org_wd; ++tempx) { C[tempx] = A0[tempx]*wt + A1[tempx]*wt1; } } } // resample along x direction (B -> C) if( org_wd==dst_wd*2 ) { //cout << "testing scale width by 1/2." << endl; float r2 = r/2; for(x=0 ; x < dst_wd; x++ ) B0[x]=(C[2*x]+C[2*x+1])*r2; } else if( org_wd==dst_wd*3 ) { //cout << "testing scale width by 1/3." << endl; for(x=0; x<dst_wd; x++) B0[x]=(C[3*x]+C[3*x+1]+C[3*x+2])*(r/3); } else if( org_wd==dst_wd*4 ) { //cout << "testing scale width by 1/4." << endl; for(x=0; x<dst_wd; x++) B0[x]=(C[4*x]+C[4*x+1]+C[4*x+2]+C[4*x+3])*(r/4); } else if( org_wd>dst_wd ) { //cout << "testing scale width by any number." << endl; //cout << "xbd[0] = " << xbd[0] << endl; x=0; //#define U(o) C[xa+o]*xwts[x*4+o] if(xbd[0]==2) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1];// U(0)+U(1); } if(xbd[0]==3) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2];//U(0)+U(1)+U(2); } if(xbd[0]==4) for(; x<dst_wd; x++) { xa=xas[x*4]; B0[x] = C[xa]*xwts[x*4] + C[xa+1]*xwts[x*4+1] + C[xa+2]*xwts[x*4+2] + C[xa+3]*xwts[x*4+3];//U(0)+U(1)+U(2)+U(3); } if(xbd[0]>4) for(; x<wn; x++) { B0[xbs[x]] += C[xas[x]] * xwts[x]; } } else { //cout << "testing scale width up!" << endl; for(x=0; x<xbd[0]; x++) B0[x] = C[xas[x]]*xwts[x]; for(; x<dst_wd-xbd[1]; x++) B0[x] = C[xas[x]]*xwts[x]+C[xas[x]+1]*(r-xwts[x]); for(; x<dst_wd; x++) B0[x] = C[xas[x]]*xwts[x]; } } } delete[] C; delete[] xas; delete[] xbs; delete[] xwts; delete[] yas; delete[] ybs; delete[] ywts; return; } void img_process::ConvTri1(float* I, float* O, int ht, int wd, int dim, float p, int s) { const float nrm = 1.0f/((p+2)*(p+2)); float *It, *Im, *Ib, *T= new float[wd]; /// perform convTri dimension by dimension for( int d0=0; d0<dim; d0++ ) { for(int y=s/2; y<ht; y+= s ) /// this is equivalent to i = 0 to ht { /// point It to the current dim and row It= Im = Ib = I+ y*wd+d0*ht*wd; if(y>0) /// not the first row, let It point to previous row It-=wd; if(y < ht-1) /// not the last row, let Ib point to next row Ib+=wd; for(int x=0; x< wd; ++x ) T[x]=nrm*(It[x]+p*Im[x]+Ib[x]); ConvTri1X(T,O,wd,p,s); O += wd/s; /// point to next row } } } void img_process::ConvTri1X(float* I, float* O, int wd, float p, int s) { int j = 0; O[j]=(1+p)*I[j]+I[j+1]; ++j; for(; j < wd - 1; ++j ) O[j]=I[j-1]+p*I[j]+I[j+1]; O[j]=I[j-1]+(1+p)*I[j]; } /// copy the opencv mat files to array with interleaved color channels void img_process::get_pix_all_scales_int(cv::Mat& img, const vector<cv::Size>& scales, float* pix_array) { #ifdef __OUTPUT_PIX__ ofstream pix_out; pix_out.open("pix_out_int.txt",ios::out); #endif for(vector<cv::Size>::const_iterator ii = scales.begin(); ii != scales.end(); ++ii) { cv::Mat img_small; unsigned height = static_cast<unsigned>(ii->height); unsigned width = static_cast<unsigned>(ii->width); if(height == static_cast<unsigned>(img.rows) && width == static_cast<unsigned>(img.cols)) img_small = img; else imResample(img, img_small, height,width, 1.0f); //cout << "Currently at scale " << ii - scales.begin() << ", height = " << img_small.rows << " width = " << img_small.cols << ", number of channels = " << img_small.channels() << endl; float* mat_ptr = img_small.ptr<float>(0); unsigned array_sz = width*height*img_small.channels(); memcpy(pix_array, mat_ptr, array_sz*sizeof(float)); #ifdef __OUTPUT_PIX__ for(int i = 0; i < img_small.channels(); ++i) for(unsigned j = i; j < array_sz; j+= img_small.channels()) pix_out << pix_array[j] << " "; pix_out << endl << endl; #endif pix_array += array_sz; } #ifdef __OUTPUT_PIX__ pix_out.close(); #endif return; } /// copy opencv mat files to array with linear ordered color channels (each channel is stored separatly in memory) /// this process is slightly slower than the interleaved memroy access (not able to use memcpy) void img_process::get_pix_all_scales_lin(cv::Mat& img, const vector<cv::Size>& scales, float* pix_array) { #ifdef __OUTPUT_PIX__ ofstream pix_out; pix_out.open("pix_out_lin.txt",ios::out); #endif int arr_sz = static_cast<unsigned>(scales[0].height) * static_cast<unsigned>(scales[0].width) * img.channels(); float* img_small = new float[arr_sz]; float* mat_ptr = img.ptr<float>(0); for(vector<cv::Size>::const_iterator ii = scales.begin(); ii != scales.end(); ++ii) { //cout << "Currently at scale # " << ii-scales.begin() << endl; int height = static_cast<int>(ii->height); int width = static_cast<int>(ii->width); int array_sz = width*height*img.channels(); imResample_array_int2lin(mat_ptr, img_small, img.channels(), img.rows, img.cols, height, width, 1.0f); memcpy(pix_array, img_small, array_sz*sizeof(float)); #ifdef __OUTPUT_PIX__ for(int i = 0; i < array_sz; ++i) { pix_out << pix_array[i] << " "; } pix_out << endl << endl; #endif pix_array += array_sz; } #ifdef __OUTPUT_PIX__ pix_out.close(); #endif delete[] img_small; return; }
51b38ba29437cc6eb61cf2f4b25be797ff1014fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/AccumulateType.h> #include <ATen/NamedTensorUtils.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHAtomics.cuh> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor64<scalar_t, 4> output, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart; inputData += slice * itime * iheight * iwidth; scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { int index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[index]; if ((max < val) || THCNumerics<scalar_t>::isnan(val)) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } template <typename scalar_t> void max_pool3d_with_indices_out_frame( scalar_t* input_data, const Tensor& output, const Tensor& indices, int totalZ, int itime, int iheight, int iwidth, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); AT_CUDA_CHECK(hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3)) { int maxIndex = indices[slice][oFrame][oRow][oColumn]; if (maxIndex != -1) { gpuAtomicAddNoReturn(&gradInputData[slice * itime * iheight * iwidth + maxIndex], gradOutput[slice][oFrame][oRow][oColumn]); } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int64_t totalZ, int itime, int iheight, int iwidth, int oheight, int owidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_pool3d_with_indices_backward_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInputData, gradOutput.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); AT_CUDA_CHECK(hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU("max_pool3d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } Tensor work_input = input.contiguous(); Tensor work_output = output; Tensor work_indices = indices; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool3d_with_indices_out_frame", [&] { scalar_t *input_data = work_input.data_ptr<scalar_t>(); int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, totalZ, itime, iheight, iwidth, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); }); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for gradOutput"); // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth); Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); Tensor work_indices = indices.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, totalZ, itime, iheight, iwidth, oheight, owidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); }); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda"); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
51b38ba29437cc6eb61cf2f4b25be797ff1014fa.cu
#include <ATen/AccumulateType.h> #include <ATen/NamedTensorUtils.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCAtomics.cuh> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor64<scalar_t, 4> output, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart; inputData += slice * itime * iheight * iwidth; scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { int index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[index]; if ((max < val) || THCNumerics<scalar_t>::isnan(val)) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } template <typename scalar_t> void max_pool3d_with_indices_out_frame( scalar_t* input_data, const Tensor& output, const Tensor& indices, int totalZ, int itime, int iheight, int iwidth, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_pool3d_with_indices_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); AT_CUDA_CHECK(cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3)) { int maxIndex = indices[slice][oFrame][oRow][oColumn]; if (maxIndex != -1) { gpuAtomicAddNoReturn(&gradInputData[slice * itime * iheight * iwidth + maxIndex], gradOutput[slice][oFrame][oRow][oColumn]); } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int64_t totalZ, int itime, int iheight, int iwidth, int oheight, int owidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_pool3d_with_indices_backward_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( gradInputData, gradOutput.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); AT_CUDA_CHECK(cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU("max_pool3d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } Tensor work_input = input.contiguous(); Tensor work_output = output; Tensor work_indices = indices; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool3d_with_indices_out_frame", [&] { scalar_t *input_data = work_input.data_ptr<scalar_t>(); int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, totalZ, itime, iheight, iwidth, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); }); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for gradOutput"); // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth); Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); Tensor work_indices = indices.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, totalZ, itime, iheight, iwidth, oheight, owidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); }); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda"); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
06fb5ca9d89d91bdcefd5f652efa39ea8a4b3825.hip
// !!! This is a file automatically generated by hipify!!! /* only works on DIRECTED GRAPH */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define MAX_NODE 1000000 #define DEBUG 1 __device__ volatile int Cx[MAX_NODE]; __device__ volatile int PQ[MAX_NODE]; //K in parallel __global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* openList,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<K && PQ_size[id]>0){ //extract min from PQ int front = id* ( (N+K-1)/K ); int node = PQ[front]; // printf("extract min %d %d\n",id,node); // restructure the heap PQ[front]=PQ[front+PQ_size[id]-1]; PQ_size[id]-=1; int pqIndex = 0; while(2*pqIndex+1 < PQ_size[id]){ if(2*pqIndex+2 >= PQ_size[id]){ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else break; } else{ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){ int swap = PQ[front + 2*pqIndex+2]; PQ[front + 2*pqIndex+2] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+2; } else{ break; } } } //removed from openList openList[node] = -1; //added to expand next int len = atomicAdd(expandNodes_size,1); expandNodes[len]=node; } } //for K in parallel __global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent, int* expandNodes,int* expandNodes_size, int* lock ,int* flagEnd,int* openList, int N,int E, int K,int dest,int* nVFlag,int* PQ_size, int flagDiff,int* diff_off,int* diff_edge,int* diff_weight ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id< *expandNodes_size ){ int node = expandNodes[id]; //printf("%d %d\n",id,node); //reach dest if(node == dest){ *flagEnd = 1; //printf("found %d\n",id); return; } // expand int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start<end){ int child = edge[start]; //deleted edges if(child<0){ start++; continue; } //printf("%d$ before while\n",id); //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock while(atomicCAS(&lock[child],0,1)!=0){ } //printf("%d$%d: %d ,%d\n",node,child,Cx[child],lock[child]); //update cost value if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(DEBUG) printf("exp: %d %d\n",node,child); if(openList[child]>=0){ //update operating on one thread if(DEBUG) printf("upd: %d %d\n",node,child); int Kq = openList[child]; int front = Kq*( (N+K-1)/K ); int index = -1; for(int i=front;i<front+PQ_size[Kq];i++){ if(PQ[i]==child){ index = i; } } if(index > 0){ int i = index; while(i > front){ if( Cx[PQ[(i-1)/2]] > Cx[PQ[i]] ){ int swap = PQ[i]; PQ[i] = PQ[(i-1)/2]; PQ[(i-1)/2] = swap; i = (i-1)/2; } else break; } } __threadfence(); }else{ nVFlag[child]=1; //add only once } } //unlock atomicCAS(&lock[child],1,0); start++; } if(DEBUG) printf("%d outside while\n",id); if(flagDiff){ // printf("something\n"); start = diff_off[node]; end = E; if(node!=N-1) end = diff_off[node+1]; while(start<end){ int child = diff_edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock while(atomicCAS(&lock[child],0,1)!=0){ } printf("%d$%d: %d ,%d\n",node,child,Cx[child],lock[child]); //update cost value if( Cx[child] > (Cx[node] - Hx[node])+ diff_weight[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ diff_weight[start] + Hx[child]; __threadfence(); parent[child] = node; // printf("%d-%d: %d ,%d\n",node,child,Cx[child],lock[child]); if(openList[child]>=0){ //update operating on one thread int Kq = openList[child]; int front = Kq*( (N+K-1)/K ); int index = -1; for(int i=front;i<front+PQ_size[Kq];i++){ if(PQ[i]==child){ index = i; } } if(index > 0){ int i = index; while(i > front){ if( Cx[PQ[(i-1)/2]] > Cx[PQ[i]] ){ int swap = PQ[i]; PQ[i] = PQ[(i-1)/2]; PQ[(i-1)/2] = swap; i = (i-1)/2; } else break; } } __threadfence(); }else{ nVFlag[child]=1; } } //unlock atomicCAS(&lock[child],1,0); start++; } } } } //N threads __global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ //printf("2: %d %d\n",id,nextFlag[id]); if(nextFlag[id]==1){ int index = atomicAdd(nvSize,1); nextV[index]=id; // printf("2: %d\n",id); } } } //for K in parallel __global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int K,int N,int* openList){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K){ // printf("id: %d\n",id); int front = id*( (N+K-1)/K ); int i = id; // printf("s: %d %d\n",*nVsize,PQS[id]); while(i<*nVsize){ PQ[front+PQS[id]]= nextV[i]; PQS[id]+=1; //add in openList openList[nextV[i]] = id; //printf("insert: %d, %d\n",nextV[i],PQS[id]); if(PQS[id]>1){ int index = PQS[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } i += K; } } } __global__ void printCX(int dest){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id==0){ printf("cost: %d\n",Cx[dest]); } } int main(){ //the K PQ int K ; scanf("%d\n",&K); int startNode,endNode; scanf("%d %d",&startNode,&endNode); FILE* fgraph = fopen("graph.txt","r"); int N,E; fscanf(fgraph,"%d %d\n",&N,&E); int* H_offset = (int*)malloc(sizeof(int)*N); int* H_edges = (int*)malloc(sizeof(int)*E); unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E); int* H_hx = (int*)malloc(sizeof(int)*N); int* H_cx = (int*)malloc(sizeof(int)*N); int* H_parent = (int*)malloc(sizeof(int)*N); int* H_PQ = (int*)malloc(sizeof(int)*N); int* H_openList = (int*)malloc(sizeof(int)*N); int* H_PQ_size = (int*)malloc(sizeof(int)*K); //for diff int* H_diff_edges = (int*)malloc(sizeof(int)*E); int* H_diff_offset = (int*)malloc(sizeof(int)*N); int* H_diff_weight = (int*)malloc(sizeof(int)*E); memset(H_PQ_size,0,sizeof(int)*K); memset(H_parent,-1,sizeof(int)*N); memset(H_openList,-1,sizeof(int)*N); //init cx for(int i=0;i<N;i++){ H_cx[i]=INT_MAX; } for(int i=0;i<E;i++){ fscanf(fgraph,"%d",&H_edges[i]); } for(int i=0;i<N;i++){ fscanf(fgraph,"%d",&H_offset[i]); } for(int i=0;i<E;i++){ fscanf(fgraph,"%u",&H_weight[i]); } FILE* fhx = fopen("Hx.txt","r"); for(int i=0;i<N;i++){ fscanf(fhx,"%d",&H_hx[i]); } fclose(fgraph); fclose(fhx); printf("completed input\n"); //init Host var int* H_flagEnd = (int*)malloc(sizeof(int)); int* H_a0 = (int*)malloc(sizeof(int)); //required coz if many tries to add same in diff threads high low lower int* H_nVFlag = (int*)malloc(sizeof(int)*N); memset(H_nVFlag,-1,sizeof(int)*N); *H_flagEnd = 0; *H_a0 = 0; //insert startNode in PQ[0] H_cx[startNode]=H_hx[startNode]; H_PQ[0]=startNode; H_PQ_size[0]=1; H_openList[startNode]=0; int* D_offset; int* D_edges ; unsigned int* D_weight; int* D_hx; int* D_parent; int* D_PQ_size; int* D_openList; int* D_lock; int* D_diff_edges; int* D_diff_offset; int* D_diff_weight; int* D_nVFlag; int* D_nV; int* D_nV_size; int* D_expandNodes; int* D_expandNodes_size; int* D_flagEnd; hipMalloc(&D_offset,sizeof(int)*N); hipMalloc(&D_edges,sizeof(int)*E); hipMalloc(&D_weight,sizeof(unsigned int)*E); hipMalloc(&D_hx,sizeof(int)*N); hipMalloc(&D_parent,sizeof(int)*N); // hipMalloc(&D_PQ,sizeof(int)*N); hipMalloc(&D_PQ_size,sizeof(int)*K); hipMalloc(&D_openList,sizeof(int)*N); hipMalloc(&D_lock,sizeof(int)*N); //diff csr hipMalloc(&D_diff_edges,sizeof(int)*E); hipMalloc(&D_diff_offset,sizeof(int)*N); hipMalloc(&D_diff_weight,sizeof(int)*E); //for next set of vertices to add in PQ hipMalloc(&D_nV,sizeof(int)*N); hipMalloc(&D_nV_size,sizeof(int)); hipMalloc(&D_nVFlag,sizeof(int)*N); hipMalloc(&D_expandNodes,sizeof(int)*N); hipMalloc(&D_expandNodes_size,sizeof(int)); //flag to end search hipMalloc(&D_flagEnd,sizeof(int)); hipMemcpy(D_offset,H_offset,sizeof(int)*N,hipMemcpyHostToDevice); hipMemcpy(D_edges,H_edges,sizeof(int)*E,hipMemcpyHostToDevice); hipMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,hipMemcpyHostToDevice); hipMemcpy(D_hx,H_hx,sizeof(int)*N,hipMemcpyHostToDevice); hipMemcpy(D_parent,H_parent,sizeof(int)*N,hipMemcpyHostToDevice); hipMemcpy(D_openList,H_openList,sizeof(int)*N,hipMemcpyHostToDevice); hipMemcpy(D_diff_edges,H_diff_edges,sizeof(int)*E,hipMemcpyHostToDevice); hipMemcpy(D_diff_offset,H_diff_offset,sizeof(int)*N,hipMemcpyHostToDevice); hipMemcpy(D_diff_weight,H_diff_weight,sizeof(int)*E,hipMemcpyHostToDevice); // hipMemcpy(D_PQ,H_PQ,sizeof(int)*N,hipMemcpyHostToDevice); hipMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,hipMemcpyHostToDevice); hipMemcpyToSymbol(Cx,H_cx, sizeof(int)*N, 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, hipMemcpyHostToDevice); hipMemcpy(D_flagEnd,H_flagEnd,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice); hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice); hipMemset(D_lock,0,sizeof(int)*N); int flag_PQ_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_empty=1; } int numThreads = 512; int numBlocks = (K+numThreads-1)/numThreads; int N_numBlocks = (N+numThreads-1)/numThreads; //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_empty==1){ //extract min hipLaunchKernelGGL(( extractMin), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K); hipDeviceSynchronize(); hipLaunchKernelGGL(( A_star_expand), dim3(numBlocks),dim3(numThreads), 0, 0, D_offset,D_edges,D_weight,D_hx,D_parent, D_expandNodes,D_expandNodes_size, D_lock ,D_flagEnd,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, false,D_diff_offset,D_diff_edges,D_diff_offset ); hipDeviceSynchronize(); //gen from flag D_nV //for N in parallel hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N); hipDeviceSynchronize(); hipLaunchKernelGGL(( insertPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_nV,D_nV_size,K,N,D_openList); hipDeviceSynchronize(); //cpy flagend and flagEmpty hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,hipMemcpyDeviceToHost); //reset nVFlag hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice); //reset next insert array hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice); flag_PQ_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_empty=1; } } hipLaunchKernelGGL(( printCX), dim3(1),dim3(1), 0, 0, endNode); hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost); if(*H_flagEnd==1){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); p = H_parent[p]; } printf("%d\n",startNode); } else{ printf("not found\n"); } }
06fb5ca9d89d91bdcefd5f652efa39ea8a4b3825.cu
/* only works on DIRECTED GRAPH */ #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define MAX_NODE 1000000 #define DEBUG 1 __device__ volatile int Cx[MAX_NODE]; __device__ volatile int PQ[MAX_NODE]; //K in parallel __global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* openList,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<K && PQ_size[id]>0){ //extract min from PQ int front = id* ( (N+K-1)/K ); int node = PQ[front]; // printf("extract min %d %d\n",id,node); // restructure the heap PQ[front]=PQ[front+PQ_size[id]-1]; PQ_size[id]-=1; int pqIndex = 0; while(2*pqIndex+1 < PQ_size[id]){ if(2*pqIndex+2 >= PQ_size[id]){ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else break; } else{ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){ int swap = PQ[front + 2*pqIndex+2]; PQ[front + 2*pqIndex+2] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+2; } else{ break; } } } //removed from openList openList[node] = -1; //added to expand next int len = atomicAdd(expandNodes_size,1); expandNodes[len]=node; } } //for K in parallel __global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent, int* expandNodes,int* expandNodes_size, int* lock ,int* flagEnd,int* openList, int N,int E, int K,int dest,int* nVFlag,int* PQ_size, int flagDiff,int* diff_off,int* diff_edge,int* diff_weight ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id< *expandNodes_size ){ int node = expandNodes[id]; //printf("%d %d\n",id,node); //reach dest if(node == dest){ *flagEnd = 1; //printf("found %d\n",id); return; } // expand int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start<end){ int child = edge[start]; //deleted edges if(child<0){ start++; continue; } //printf("%d$ before while\n",id); //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock while(atomicCAS(&lock[child],0,1)!=0){ } //printf("%d$%d: %d ,%d\n",node,child,Cx[child],lock[child]); //update cost value if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(DEBUG) printf("exp: %d %d\n",node,child); if(openList[child]>=0){ //update operating on one thread if(DEBUG) printf("upd: %d %d\n",node,child); int Kq = openList[child]; int front = Kq*( (N+K-1)/K ); int index = -1; for(int i=front;i<front+PQ_size[Kq];i++){ if(PQ[i]==child){ index = i; } } if(index > 0){ int i = index; while(i > front){ if( Cx[PQ[(i-1)/2]] > Cx[PQ[i]] ){ int swap = PQ[i]; PQ[i] = PQ[(i-1)/2]; PQ[(i-1)/2] = swap; i = (i-1)/2; } else break; } } __threadfence(); }else{ nVFlag[child]=1; //add only once } } //unlock atomicCAS(&lock[child],1,0); start++; } if(DEBUG) printf("%d outside while\n",id); if(flagDiff){ // printf("something\n"); start = diff_off[node]; end = E; if(node!=N-1) end = diff_off[node+1]; while(start<end){ int child = diff_edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock while(atomicCAS(&lock[child],0,1)!=0){ } printf("%d$%d: %d ,%d\n",node,child,Cx[child],lock[child]); //update cost value if( Cx[child] > (Cx[node] - Hx[node])+ diff_weight[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ diff_weight[start] + Hx[child]; __threadfence(); parent[child] = node; // printf("%d-%d: %d ,%d\n",node,child,Cx[child],lock[child]); if(openList[child]>=0){ //update operating on one thread int Kq = openList[child]; int front = Kq*( (N+K-1)/K ); int index = -1; for(int i=front;i<front+PQ_size[Kq];i++){ if(PQ[i]==child){ index = i; } } if(index > 0){ int i = index; while(i > front){ if( Cx[PQ[(i-1)/2]] > Cx[PQ[i]] ){ int swap = PQ[i]; PQ[i] = PQ[(i-1)/2]; PQ[(i-1)/2] = swap; i = (i-1)/2; } else break; } } __threadfence(); }else{ nVFlag[child]=1; } } //unlock atomicCAS(&lock[child],1,0); start++; } } } } //N threads __global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ //printf("2: %d %d\n",id,nextFlag[id]); if(nextFlag[id]==1){ int index = atomicAdd(nvSize,1); nextV[index]=id; // printf("2: %d\n",id); } } } //for K in parallel __global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int K,int N,int* openList){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K){ // printf("id: %d\n",id); int front = id*( (N+K-1)/K ); int i = id; // printf("s: %d %d\n",*nVsize,PQS[id]); while(i<*nVsize){ PQ[front+PQS[id]]= nextV[i]; PQS[id]+=1; //add in openList openList[nextV[i]] = id; //printf("insert: %d, %d\n",nextV[i],PQS[id]); if(PQS[id]>1){ int index = PQS[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } i += K; } } } __global__ void printCX(int dest){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id==0){ printf("cost: %d\n",Cx[dest]); } } int main(){ //the K PQ int K ; scanf("%d\n",&K); int startNode,endNode; scanf("%d %d",&startNode,&endNode); FILE* fgraph = fopen("graph.txt","r"); int N,E; fscanf(fgraph,"%d %d\n",&N,&E); int* H_offset = (int*)malloc(sizeof(int)*N); int* H_edges = (int*)malloc(sizeof(int)*E); unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E); int* H_hx = (int*)malloc(sizeof(int)*N); int* H_cx = (int*)malloc(sizeof(int)*N); int* H_parent = (int*)malloc(sizeof(int)*N); int* H_PQ = (int*)malloc(sizeof(int)*N); int* H_openList = (int*)malloc(sizeof(int)*N); int* H_PQ_size = (int*)malloc(sizeof(int)*K); //for diff int* H_diff_edges = (int*)malloc(sizeof(int)*E); int* H_diff_offset = (int*)malloc(sizeof(int)*N); int* H_diff_weight = (int*)malloc(sizeof(int)*E); memset(H_PQ_size,0,sizeof(int)*K); memset(H_parent,-1,sizeof(int)*N); memset(H_openList,-1,sizeof(int)*N); //init cx for(int i=0;i<N;i++){ H_cx[i]=INT_MAX; } for(int i=0;i<E;i++){ fscanf(fgraph,"%d",&H_edges[i]); } for(int i=0;i<N;i++){ fscanf(fgraph,"%d",&H_offset[i]); } for(int i=0;i<E;i++){ fscanf(fgraph,"%u",&H_weight[i]); } FILE* fhx = fopen("Hx.txt","r"); for(int i=0;i<N;i++){ fscanf(fhx,"%d",&H_hx[i]); } fclose(fgraph); fclose(fhx); printf("completed input\n"); //init Host var int* H_flagEnd = (int*)malloc(sizeof(int)); int* H_a0 = (int*)malloc(sizeof(int)); //required coz if many tries to add same in diff threads high low lower int* H_nVFlag = (int*)malloc(sizeof(int)*N); memset(H_nVFlag,-1,sizeof(int)*N); *H_flagEnd = 0; *H_a0 = 0; //insert startNode in PQ[0] H_cx[startNode]=H_hx[startNode]; H_PQ[0]=startNode; H_PQ_size[0]=1; H_openList[startNode]=0; int* D_offset; int* D_edges ; unsigned int* D_weight; int* D_hx; int* D_parent; int* D_PQ_size; int* D_openList; int* D_lock; int* D_diff_edges; int* D_diff_offset; int* D_diff_weight; int* D_nVFlag; int* D_nV; int* D_nV_size; int* D_expandNodes; int* D_expandNodes_size; int* D_flagEnd; cudaMalloc(&D_offset,sizeof(int)*N); cudaMalloc(&D_edges,sizeof(int)*E); cudaMalloc(&D_weight,sizeof(unsigned int)*E); cudaMalloc(&D_hx,sizeof(int)*N); cudaMalloc(&D_parent,sizeof(int)*N); // cudaMalloc(&D_PQ,sizeof(int)*N); cudaMalloc(&D_PQ_size,sizeof(int)*K); cudaMalloc(&D_openList,sizeof(int)*N); cudaMalloc(&D_lock,sizeof(int)*N); //diff csr cudaMalloc(&D_diff_edges,sizeof(int)*E); cudaMalloc(&D_diff_offset,sizeof(int)*N); cudaMalloc(&D_diff_weight,sizeof(int)*E); //for next set of vertices to add in PQ cudaMalloc(&D_nV,sizeof(int)*N); cudaMalloc(&D_nV_size,sizeof(int)); cudaMalloc(&D_nVFlag,sizeof(int)*N); cudaMalloc(&D_expandNodes,sizeof(int)*N); cudaMalloc(&D_expandNodes_size,sizeof(int)); //flag to end search cudaMalloc(&D_flagEnd,sizeof(int)); cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice); cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice); cudaMemcpy(D_hx,H_hx,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_diff_edges,H_diff_edges,sizeof(int)*E,cudaMemcpyHostToDevice); cudaMemcpy(D_diff_offset,H_diff_offset,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_diff_weight,H_diff_weight,sizeof(int)*E,cudaMemcpyHostToDevice); // cudaMemcpy(D_PQ,H_PQ,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Cx,H_cx, sizeof(int)*N, 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice); cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice); cudaMemset(D_lock,0,sizeof(int)*N); int flag_PQ_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_empty=1; } int numThreads = 512; int numBlocks = (K+numThreads-1)/numThreads; int N_numBlocks = (N+numThreads-1)/numThreads; //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_empty==1){ //extract min extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K); cudaDeviceSynchronize(); A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent, D_expandNodes,D_expandNodes_size, D_lock ,D_flagEnd,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, false,D_diff_offset,D_diff_edges,D_diff_offset ); cudaDeviceSynchronize(); //gen from flag D_nV //for N in parallel setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N); cudaDeviceSynchronize(); insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,K,N,D_openList); cudaDeviceSynchronize(); //cpy flagend and flagEmpty cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost); //reset nVFlag cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice); //reset next insert array cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice); flag_PQ_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_empty=1; } } printCX<<<1,1>>>(endNode); cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost); if(*H_flagEnd==1){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); p = H_parent[p]; } printf("%d\n",startNode); } else{ printf("not found\n"); } }
358cc422507c272cbf0cadb1c8690d053c7ea861.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ static void out(struct axon *neuro, unsigned char *spike,int *output_image, struct OutPutLayer *outputlayer, int *output_timeN) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=bid *320 + tid; int timeN=output_timeN[number]; int layernum; int neuron_num; int spike_strem; if(number<outputlayer[0].all_length) { for(int i=0;i<outputlayer[0].all_layer_num;i++) { if(number<=outputlayer[i].last_num) { layernum=i; if(i>0) { neuron_num=number-(outputlayer[i-1].last_num+1); } else { neuron_num=number; } break; } } spike_strem=output_image[number]; if(timeN==0) {spike_strem=(spike_strem<<1)&0x03ff;} spike_strem=spike_strem|spike[neuron_num+outputlayer[layernum].addr]; //step=200us ,1ms store 1 spike output_image[number]=spike_strem; /* if(spike[neuron_num+outputlayer[layernum].addr]==1) { printf("number=%d\n",number); }*/ } timeN=(timeN+1)%5; output_timeN[number]=timeN; } __global__ static void fmri(struct axon *neuro, unsigned char *spike,int *output_image, struct OutPutLayer *outputlayer) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=bid *320 + tid; float x; float t=100; int layernum; int neuron_num; int spike_strem; if(number<outputlayer[0].all_length) { for(int i=0;i<outputlayer[0].all_layer_num;i++) { if(number<=outputlayer[i].last_num) { layernum=i; if(i>0) { neuron_num=number-(outputlayer[i-1].last_num+1); } else { neuron_num=number; } break; } } x=output_image[number]/1000000.0; if(spike[neuron_num+outputlayer[layernum].addr]==1) { x=x+0.01; } x=x+tau*(-x/t); output_image[number]=(int)(x*1000000); } }
358cc422507c272cbf0cadb1c8690d053c7ea861.cu
#include "cuda_runtime.h" #include <stdio.h> __global__ static void out(struct axon *neuro, unsigned char *spike,int *output_image, struct OutPutLayer *outputlayer, int *output_timeN) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=bid *320 + tid; int timeN=output_timeN[number]; int layernum; int neuron_num; int spike_strem; if(number<outputlayer[0].all_length) { for(int i=0;i<outputlayer[0].all_layer_num;i++) { if(number<=outputlayer[i].last_num) { layernum=i; if(i>0) { neuron_num=number-(outputlayer[i-1].last_num+1); } else { neuron_num=number; } break; } } spike_strem=output_image[number]; if(timeN==0) {spike_strem=(spike_strem<<1)&0x03ff;} spike_strem=spike_strem|spike[neuron_num+outputlayer[layernum].addr]; //step=200us ,1ms store 1 spike output_image[number]=spike_strem; /* if(spike[neuron_num+outputlayer[layernum].addr]==1) { printf("number=%d\n",number); }*/ } timeN=(timeN+1)%5; output_timeN[number]=timeN; } __global__ static void fmri(struct axon *neuro, unsigned char *spike,int *output_image, struct OutPutLayer *outputlayer) { const int tid = threadIdx.x; const int bid = blockIdx.x; int number=bid *320 + tid; float x; float t=100; int layernum; int neuron_num; int spike_strem; if(number<outputlayer[0].all_length) { for(int i=0;i<outputlayer[0].all_layer_num;i++) { if(number<=outputlayer[i].last_num) { layernum=i; if(i>0) { neuron_num=number-(outputlayer[i-1].last_num+1); } else { neuron_num=number; } break; } } x=output_image[number]/1000000.0; if(spike[neuron_num+outputlayer[layernum].addr]==1) { x=x+0.01; } x=x+tau*(-x/t); output_image[number]=(int)(x*1000000); } }
ec9b6b70f73fc1732d52df6626cd8c9aca77acb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "network.h" #include "detection_layer.h" #include "cost_layer.h" #include "utils.h" #include "parser.h" #include "box.h" #include "image.h" #include <sys/time.h> } #ifdef OPENCV #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" extern "C" image ipl_to_image(IplImage* src); extern "C" void convert_yolo_detections(float *predictions, int classes, int num, int square, int side, int w, int h, float thresh, float **probs, box *boxes, int only_objectness); extern "C" void draw_yolo(image im, int num, float thresh, box *boxes, float **probs); extern "C" char *voc_names[]; extern "C" image voc_labels[]; static float **probs; static box *boxes; static network net; static image in ; static image in_s ; static image det ; static image det_s; static image disp ; static cv::VideoCapture cap; static float fps = 0; static float demo_thresh = 0; // new stuff #include "feature_matcher.h" #include <vector> #include <string> #include <deque> std::vector<std::deque<cv::Mat> > person_db; int pool_size = 2; int wait_period = 1; int since_last = 0; std::vector<int> active; cv::Mat current_img; std::vector<cv::Mat> image_matches; std::vector<cv::Mat> bad_matches; std::vector<int> indices_matches; bool no_match; IplImage* im_ptr = NULL; int frame_num = 0; void *fetch_in_thread(void *ptr) { cv::Mat frame_m; cap >> frame_m; IplImage frame = frame_m; in = ipl_to_image(&frame); rgbgr_image(in); in_s = resize_image(in, net.w, net.h); ++frame_num; return 0; } void image_to_mat(image p, cv::Mat& m) { int x,y,k; image copy = copy_image(p); constrain_image(copy); if(p.c == 3) rgbgr_image(copy); //normalize_image(copy); // char buff[256]; // //sprintf(buff, "%s (%d)", name, windows); // sprintf(buff, "%s", name); m.create(p.h, p.w, CV_8UC3); // IplImage *disp = cvCreateImage(cvSize(p.w,p.h), IPL_DEPTH_8U, p.c); // int step = disp->widthStep; // cvNamedWindow(buff, CV_WINDOW_NORMAL); //cvMoveWindow(buff, 100*(windows%10) + 200*(windows/10), 100*(windows%10)); // ++windows; for(y = 0; y < p.h; ++y){ for(x = 0; x < p.w; ++x){ for(k= 0; k < p.c; ++k){ m.at<cv::Vec3b>(y,x)[k] = (unsigned char)(get_pixel(copy,x,y,k)*255); // m.at<uchar>(y, x, 0) = 255; //(unsigned char)(get_pixel(copy,x,y,k)*255); // m.at<uchar>(y, x, 1) = 0; //(unsigned char)(get_pixel(copy,x,y,k)*255); // m.at<uchar>(y, x, 2) = 0; //(unsigned char)(get_pixel(copy,x,y,k)*255); // disp->imageData[y*step + x*p.c + k] = (unsigned char)(get_pixel(copy,x,y,k)*255); } } } free_image(copy); // m = cv::Mat(disp); // return disp; } void track_person(image image_im, int num, float thresh, box *boxes, float **probs, char **names, image *labels, int classes) { int cls_person = 14; active = std::vector<int>(person_db.size()); std::vector<cv::Rect> rects; std::vector<int> person_ids; image_matches.clear(); indices_matches.clear(); bad_matches.clear(); image_to_mat(image_im, current_img); for(int i = 0; i < num; ++i){ int cls = max_index(probs[i], classes); float prob = probs[i][cls]; if(cls == cls_person && prob > thresh){ box& b = boxes[i]; int left = (b.x-b.w/2.)*image_im.w; int right = (b.x+b.w/2.)*image_im.w; int top = (b.y-b.h/2.)*image_im.h; int bot = (b.y+b.h/2.)*image_im.h; if(left < 0) left = 0; if(right > image_im.w-1) right = image_im.w-1; if(top < 0) top = 0; if(bot > image_im.h-1) bot = image_im.h-1; cv::Rect rect(left, top, right-left, bot-top); cv::Mat new_box = current_img(rect); int found = -1; int max_person = -1; int max_matches = 0; cv::Mat max_image_match; // search match between current person with person database for (int j = 0, len = person_db.size(); j < len; ++j) { if (active[j] == 0) { int vote = 0; cv::Mat image_match; for (int k = 0, len = person_db[j].size(); k < len; ++k) { int match_result = matchFeatures(person_db[j][k], new_box, image_match); if (match_result > 0) { vote++; } else { bad_matches.push_back(image_match); } } if (vote >= person_db[j].size()/2) { max_person = j; max_matches = vote; max_image_match = image_match; break; } } } // found person, update old person portfolio if (max_person != -1) { found = max_person; if (since_last < wait_period) { ++since_last; } else { person_db[max_person].push_back(new_box.clone()); since_last = 0; } active[max_person] = max_matches; image_matches.push_back(max_image_match); indices_matches.push_back(max_person+1); } // did not find any person, creating a new profile in person database if (found == -1) { if (person_db.size() == 0) { found = person_db.size(); person_db.push_back(std::deque<cv::Mat>()); person_db.back().push_back(new_box.clone()); active.push_back(1); } } if (found != -1) { rects.push_back(rect); person_ids.push_back(found+1); if (person_db[found].size() > pool_size) { person_db[found].pop_front(); } } else { rects.push_back(rect); person_ids.push_back(0); } } } for (int i = 0, len = person_ids.size(); i < len; ++i) { // label person char person_callname[50]; sprintf(person_callname, "Person %d", person_ids[i]); if (person_ids[i]) printf("Person %d, matches %d\n", person_ids[i], active[person_ids[i]-1]); cv::putText(current_img, person_callname, cv::Point(rects[i].x+10, rects[i].y+30), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar( 255,0,0 ), 2); cv::rectangle(current_img, rects[i], cv::Scalar(240,128,128), 3); } if (image_matches.size()) { printf("Match accepted!\n"); no_match = false; } else { printf("Match rejected or no match!\n"); no_match = true; } } void *detect_in_thread(void *ptr) { float nms = .4; detection_layer l = net.layers[net.n-1]; float *X = det_s.data; float *predictions = network_predict(net, X); free_image(det_s); convert_yolo_detections(predictions, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0); if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms); // printf("\033[2J"); // printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); printf("Objects:\n\n"); // new stuff track_person(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, 20); // draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, 20); return 0; } extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index) { demo_thresh = thresh; printf("YOLO demo\n"); net = parse_network_cfg(cfgfile); if(weightfile){ load_weights(&net, weightfile); } set_batch_network(&net, 1); srand(2222222); bool use_video = false; if (use_video) { // Open video file std::string video_path = "drone.mp4"; cv::VideoCapture vid(video_path); cap = vid; if(!cap.isOpened()) error(("Couldn't open video: " + video_path + "\n").c_str()); } else { // Open camera cv::VideoCapture cam(cam_index); cap = cam; if(!cap.isOpened()) error("Couldn't connect to webcam.\n"); } detection_layer l = net.layers[net.n-1]; int j; boxes = (box *)calloc(l.side*l.side*l.n, sizeof(box)); probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *)); for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *)); pthread_t fetch_thread; pthread_t detect_thread; fetch_in_thread(0); det = in; det_s = in_s; fetch_in_thread(0); detect_in_thread(0); disp = det; det = in; det_s = in_s; int fast_forward = 1; while(1){ struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed"); if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed"); pthread_join(fetch_thread, 0); pthread_join(detect_thread, 0); // if (person_db.size()) { // for (int i = 0, len = person_db[0].size(); i < len; ++i) { // if (person_db[0][i].rows) { // char match_name[50]; // sprintf(match_name, "Person 1 Sample %d", i); // cv::imshow(match_name, person_db[0][i]); // char key = cv::waitKey(1); // if (key == 's') { // fast_forward = 0; // } // } // } // } for (int i = 0, len = image_matches.size(); i < len; ++i) { if (image_matches[i].rows) { char match_name[50]; sprintf(match_name, "Match %d", indices_matches[i]); cv::imshow(match_name, image_matches[i]); char key = cv::waitKey(1); if (key == 's') { fast_forward = 0; } } } // for (int i = 0, len = bad_matches.size(); i < len; ++i) { // if (bad_matches[i].rows) { // char match_name[50]; // sprintf(match_name, "Bad Match %d", i); // cv::imshow(match_name, bad_matches[i]); // char key = cv::waitKey(1); // if (key == 's') { // fast_forward = 0; // } // } // } if (current_img.rows) { cv::imshow("YOLO", current_img); char key = cv::waitKey(1); if (key == 's') { fast_forward = 0; } } printf("Frame: %d\n", frame_num); if (fast_forward == 0) { char key = cv::waitKey(0); if (key == 'f') { fast_forward = 1; } else if (key == 's') { fast_forward = 0; } } // show_image(disp, "YOLO"); free_image(disp); cvWaitKey(1); disp = det; det = in; det_s = in_s; gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } } #else extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index){ fprintf(stderr, "YOLO demo needs OpenCV for webcam images.\n"); } #endif
ec9b6b70f73fc1732d52df6626cd8c9aca77acb5.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "network.h" #include "detection_layer.h" #include "cost_layer.h" #include "utils.h" #include "parser.h" #include "box.h" #include "image.h" #include <sys/time.h> } #ifdef OPENCV #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" extern "C" image ipl_to_image(IplImage* src); extern "C" void convert_yolo_detections(float *predictions, int classes, int num, int square, int side, int w, int h, float thresh, float **probs, box *boxes, int only_objectness); extern "C" void draw_yolo(image im, int num, float thresh, box *boxes, float **probs); extern "C" char *voc_names[]; extern "C" image voc_labels[]; static float **probs; static box *boxes; static network net; static image in ; static image in_s ; static image det ; static image det_s; static image disp ; static cv::VideoCapture cap; static float fps = 0; static float demo_thresh = 0; // new stuff #include "feature_matcher.h" #include <vector> #include <string> #include <deque> std::vector<std::deque<cv::Mat> > person_db; int pool_size = 2; int wait_period = 1; int since_last = 0; std::vector<int> active; cv::Mat current_img; std::vector<cv::Mat> image_matches; std::vector<cv::Mat> bad_matches; std::vector<int> indices_matches; bool no_match; IplImage* im_ptr = NULL; int frame_num = 0; void *fetch_in_thread(void *ptr) { cv::Mat frame_m; cap >> frame_m; IplImage frame = frame_m; in = ipl_to_image(&frame); rgbgr_image(in); in_s = resize_image(in, net.w, net.h); ++frame_num; return 0; } void image_to_mat(image p, cv::Mat& m) { int x,y,k; image copy = copy_image(p); constrain_image(copy); if(p.c == 3) rgbgr_image(copy); //normalize_image(copy); // char buff[256]; // //sprintf(buff, "%s (%d)", name, windows); // sprintf(buff, "%s", name); m.create(p.h, p.w, CV_8UC3); // IplImage *disp = cvCreateImage(cvSize(p.w,p.h), IPL_DEPTH_8U, p.c); // int step = disp->widthStep; // cvNamedWindow(buff, CV_WINDOW_NORMAL); //cvMoveWindow(buff, 100*(windows%10) + 200*(windows/10), 100*(windows%10)); // ++windows; for(y = 0; y < p.h; ++y){ for(x = 0; x < p.w; ++x){ for(k= 0; k < p.c; ++k){ m.at<cv::Vec3b>(y,x)[k] = (unsigned char)(get_pixel(copy,x,y,k)*255); // m.at<uchar>(y, x, 0) = 255; //(unsigned char)(get_pixel(copy,x,y,k)*255); // m.at<uchar>(y, x, 1) = 0; //(unsigned char)(get_pixel(copy,x,y,k)*255); // m.at<uchar>(y, x, 2) = 0; //(unsigned char)(get_pixel(copy,x,y,k)*255); // disp->imageData[y*step + x*p.c + k] = (unsigned char)(get_pixel(copy,x,y,k)*255); } } } free_image(copy); // m = cv::Mat(disp); // return disp; } void track_person(image image_im, int num, float thresh, box *boxes, float **probs, char **names, image *labels, int classes) { int cls_person = 14; active = std::vector<int>(person_db.size()); std::vector<cv::Rect> rects; std::vector<int> person_ids; image_matches.clear(); indices_matches.clear(); bad_matches.clear(); image_to_mat(image_im, current_img); for(int i = 0; i < num; ++i){ int cls = max_index(probs[i], classes); float prob = probs[i][cls]; if(cls == cls_person && prob > thresh){ box& b = boxes[i]; int left = (b.x-b.w/2.)*image_im.w; int right = (b.x+b.w/2.)*image_im.w; int top = (b.y-b.h/2.)*image_im.h; int bot = (b.y+b.h/2.)*image_im.h; if(left < 0) left = 0; if(right > image_im.w-1) right = image_im.w-1; if(top < 0) top = 0; if(bot > image_im.h-1) bot = image_im.h-1; cv::Rect rect(left, top, right-left, bot-top); cv::Mat new_box = current_img(rect); int found = -1; int max_person = -1; int max_matches = 0; cv::Mat max_image_match; // search match between current person with person database for (int j = 0, len = person_db.size(); j < len; ++j) { if (active[j] == 0) { int vote = 0; cv::Mat image_match; for (int k = 0, len = person_db[j].size(); k < len; ++k) { int match_result = matchFeatures(person_db[j][k], new_box, image_match); if (match_result > 0) { vote++; } else { bad_matches.push_back(image_match); } } if (vote >= person_db[j].size()/2) { max_person = j; max_matches = vote; max_image_match = image_match; break; } } } // found person, update old person portfolio if (max_person != -1) { found = max_person; if (since_last < wait_period) { ++since_last; } else { person_db[max_person].push_back(new_box.clone()); since_last = 0; } active[max_person] = max_matches; image_matches.push_back(max_image_match); indices_matches.push_back(max_person+1); } // did not find any person, creating a new profile in person database if (found == -1) { if (person_db.size() == 0) { found = person_db.size(); person_db.push_back(std::deque<cv::Mat>()); person_db.back().push_back(new_box.clone()); active.push_back(1); } } if (found != -1) { rects.push_back(rect); person_ids.push_back(found+1); if (person_db[found].size() > pool_size) { person_db[found].pop_front(); } } else { rects.push_back(rect); person_ids.push_back(0); } } } for (int i = 0, len = person_ids.size(); i < len; ++i) { // label person char person_callname[50]; sprintf(person_callname, "Person %d", person_ids[i]); if (person_ids[i]) printf("Person %d, matches %d\n", person_ids[i], active[person_ids[i]-1]); cv::putText(current_img, person_callname, cv::Point(rects[i].x+10, rects[i].y+30), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar( 255,0,0 ), 2); cv::rectangle(current_img, rects[i], cv::Scalar(240,128,128), 3); } if (image_matches.size()) { printf("Match accepted!\n"); no_match = false; } else { printf("Match rejected or no match!\n"); no_match = true; } } void *detect_in_thread(void *ptr) { float nms = .4; detection_layer l = net.layers[net.n-1]; float *X = det_s.data; float *predictions = network_predict(net, X); free_image(det_s); convert_yolo_detections(predictions, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0); if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms); // printf("\033[2J"); // printf("\033[1;1H"); printf("\nFPS:%.0f\n",fps); printf("Objects:\n\n"); // new stuff track_person(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, 20); // draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, voc_names, voc_labels, 20); return 0; } extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index) { demo_thresh = thresh; printf("YOLO demo\n"); net = parse_network_cfg(cfgfile); if(weightfile){ load_weights(&net, weightfile); } set_batch_network(&net, 1); srand(2222222); bool use_video = false; if (use_video) { // Open video file std::string video_path = "drone.mp4"; cv::VideoCapture vid(video_path); cap = vid; if(!cap.isOpened()) error(("Couldn't open video: " + video_path + "\n").c_str()); } else { // Open camera cv::VideoCapture cam(cam_index); cap = cam; if(!cap.isOpened()) error("Couldn't connect to webcam.\n"); } detection_layer l = net.layers[net.n-1]; int j; boxes = (box *)calloc(l.side*l.side*l.n, sizeof(box)); probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *)); for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *)); pthread_t fetch_thread; pthread_t detect_thread; fetch_in_thread(0); det = in; det_s = in_s; fetch_in_thread(0); detect_in_thread(0); disp = det; det = in; det_s = in_s; int fast_forward = 1; while(1){ struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed"); if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed"); pthread_join(fetch_thread, 0); pthread_join(detect_thread, 0); // if (person_db.size()) { // for (int i = 0, len = person_db[0].size(); i < len; ++i) { // if (person_db[0][i].rows) { // char match_name[50]; // sprintf(match_name, "Person 1 Sample %d", i); // cv::imshow(match_name, person_db[0][i]); // char key = cv::waitKey(1); // if (key == 's') { // fast_forward = 0; // } // } // } // } for (int i = 0, len = image_matches.size(); i < len; ++i) { if (image_matches[i].rows) { char match_name[50]; sprintf(match_name, "Match %d", indices_matches[i]); cv::imshow(match_name, image_matches[i]); char key = cv::waitKey(1); if (key == 's') { fast_forward = 0; } } } // for (int i = 0, len = bad_matches.size(); i < len; ++i) { // if (bad_matches[i].rows) { // char match_name[50]; // sprintf(match_name, "Bad Match %d", i); // cv::imshow(match_name, bad_matches[i]); // char key = cv::waitKey(1); // if (key == 's') { // fast_forward = 0; // } // } // } if (current_img.rows) { cv::imshow("YOLO", current_img); char key = cv::waitKey(1); if (key == 's') { fast_forward = 0; } } printf("Frame: %d\n", frame_num); if (fast_forward == 0) { char key = cv::waitKey(0); if (key == 'f') { fast_forward = 1; } else if (key == 's') { fast_forward = 0; } } // show_image(disp, "YOLO"); free_image(disp); cvWaitKey(1); disp = det; det = in; det_s = in_s; gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); float curr = 1000000.f/((long int)tval_result.tv_usec); fps = .9*fps + .1*curr; } } #else extern "C" void demo_yolo(char *cfgfile, char *weightfile, float thresh, int cam_index){ fprintf(stderr, "YOLO demo needs OpenCV for webcam images.\n"); } #endif
1ee6154059ac556c73b9e6371680a077cf32198d.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> struct abs_functor { __host__ __device__ float operator()(const float& x, const float& y) const { float z = x-y; return z >= 0 ? z : -z; } }; void THNN_CudaAbsCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage) { THAssert(THCudaTensor_checkGPU(state, 2, input, target)); long size = THCudaTensor_nElement(state, input); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); float sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), abs_functor()); if (sizeAverage) sum /= size; THCudaTensor_free(state, input); THCudaTensor_free(state, target); THCudaTensor_set1d(state, output, 0, sum); } struct abs_updateGradInput_functor { const float norm; abs_updateGradInput_functor(float norm_) : norm(norm_) {} __host__ __device__ float operator()(const float& x, const float& y) const { return (x - y) >= 0 ? norm : -norm; } }; void THNN_CudaAbsCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage) { THAssert(THCudaTensor_checkGPU(state, 3, input, target, gradInput)); long size = THCudaTensor_nElement(state, input); float norm = (sizeAverage ? 1./size : 1.); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); THCudaTensor_resizeAs(state, gradInput, input); thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, abs_updateGradInput_functor(norm)); THCudaTensor_free(state, input); THCudaTensor_free(state, target); }
1ee6154059ac556c73b9e6371680a077cf32198d.cu
#include "THCUNN.h" #include <thrust/fill.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/inner_product.h> struct abs_functor { __host__ __device__ float operator()(const float& x, const float& y) const { float z = x-y; return z >= 0 ? z : -z; } }; void THNN_CudaAbsCriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage) { THAssert(THCudaTensor_checkGPU(state, 2, input, target)); long size = THCudaTensor_nElement(state, input); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); float sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus<float>(), abs_functor()); if (sizeAverage) sum /= size; THCudaTensor_free(state, input); THCudaTensor_free(state, target); THCudaTensor_set1d(state, output, 0, sum); } struct abs_updateGradInput_functor { const float norm; abs_updateGradInput_functor(float norm_) : norm(norm_) {} __host__ __device__ float operator()(const float& x, const float& y) const { return (x - y) >= 0 ? norm : -norm; } }; void THNN_CudaAbsCriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage) { THAssert(THCudaTensor_checkGPU(state, 3, input, target, gradInput)); long size = THCudaTensor_nElement(state, input); float norm = (sizeAverage ? 1./size : 1.); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); THCudaTensor_resizeAs(state, gradInput, input); thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput)); thrust::transform(input_data, input_data+size, target_data, gradInput_data, abs_updateGradInput_functor(norm)); THCudaTensor_free(state, input); THCudaTensor_free(state, target); }
e3f512827c1ad07a3bf4f2861a3dd4925955c5c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % by: Alireza Ahmadi % % University of Bonn- MSc Robotics & Geodetic Engineering% % [email protected] % % AlirezaAhmadi.xyz % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ #include "defGraph.h" namespace DynaMap{ namespace geometry{ defGraph::defGraph(void){} defGraph::~defGraph(void){ Free(); } void defGraph::init(MeshSTD &srcMesh, int ObservationNum, int mode){ nodeNum = NODE_NUM; activeNodesNum = (nodeNum > ObservationNum) ? ObservationNum : nodeNum; defGraphMesh = srcMesh; nNum = KNN; visibleNodesNum = activeNodesNum; initGraphNodes(srcMesh, mode); if(!KDTREE){ hipMallocManaged(&visibleNodeIds, sizeof(int) * ObservationNum * nodeNum); hipMallocManaged(&visibleNWeights, sizeof(float) * ObservationNum * nodeNum); hipMallocManaged(&visibleNDistances, sizeof(float) * ObservationNum * nodeNum); hipDeviceSynchronize(); }else{ hipMallocManaged(&graphKDTree, sizeof(kdTree)); hipDeviceSynchronize(); graphKDTree->init(nodeNum); // allocating Query nodes on Host memory struct kdNode *kdQuery_h; kdQuery_h =(struct kdNode*) calloc(nodeNum, sizeof(struct kdNode)); // loading KDtree Query nodes from Graph Nodes for(int n = 0; n < nodeNum; n++){ kdQuery_h[n].id = n; kdQuery_h[n].x[0] = nodes[n].vertex.position.x; kdQuery_h[n].x[1] = nodes[n].vertex.position.y; kdQuery_h[n].x[2] = nodes[n].vertex.position.z; // std::cout << kdQuery[n].id << ", "<<kdQuery[n].x[0] << ", " << kdQuery[n].x[1] << ", " <<kdQuery[n].x[2] << std::endl; } // copy Query KDtree to Device memory hipMemcpy(graphKDTree->kdQuery, kdQuery_h, sizeof(struct kdNode) * nodeNum, hipMemcpyHostToDevice); hipDeviceSynchronize(); // allocating Root of KDTree on Host memory struct kdNode *kdRoot_h; kdRoot_h = (struct kdNode*) calloc(nodeNum, sizeof(struct kdNode)); // build DKTree on Host memory kdRoot_h = graphKDTree->buildTree(kdQuery_h, nodeNum, 0, 3); // copy KDtree to Device memory hipMemcpy(graphKDTree->kdRoot, kdRoot_h, sizeof(struct kdNode) * nodeNum, hipMemcpyHostToDevice); hipDeviceSynchronize(); hipMallocManaged(&visibleNodeIds, sizeof(int) * ObservationNum * nNum); hipMallocManaged(&visibleNWeights, sizeof(float) * ObservationNum * nNum); hipMallocManaged(&visibleNDistances, sizeof(float) * ObservationNum * nNum); hipDeviceSynchronize(); } std::cout << "Graph nodeNum: " << nodeNum << ", KDtree: " << KDTREE << std::endl; } void Free(void){} // ******************************************************************* void defGraph::initGraphNodes(MeshSTD &srcMesh, int mode){ hipMallocManaged(&nodes, sizeof(defGraphNode) * nodeNum); if(KDTREE == false){ for(int cnt=0; cnt < nodeNum; cnt++){ hipMallocManaged(&nodes[cnt].nIds, sizeof(int) * nodeNum); hipMallocManaged(&nodes[cnt].nWeights, sizeof(float) * nodeNum); hipMallocManaged(&nodes[cnt].nDistances, sizeof(float) * nodeNum); } }else{ // todo..... KDTree...!!! /// issue............. for(int cnt=0; cnt < nodeNum; cnt++){ hipMallocManaged(&nodes[cnt].nIds, sizeof(int) * nodeNum); hipMallocManaged(&nodes[cnt].nWeights, sizeof(float) * nodeNum); hipMallocManaged(&nodes[cnt].nDistances, sizeof(float) * nodeNum); } } hipDeviceSynchronize(); // mesh vertices to initialize graph nodes sampleDownMeshHost(srcMesh.verticesNum, mode); // initialize nodes dual-quaternions to identity for (size_t i = 0; i < nodeNum; i++){ nodes[i].dq = math::dualQuat::identity(); } } void defGraph::Free(void){ hipDeviceSynchronize(); if(KDTREE){ hipFree(graphKDTree); } for(int cnt=0; cnt < NODE_NUM; cnt++){ hipFree(nodes[cnt].nIds); hipFree(nodes[cnt].nWeights); hipFree(nodes[cnt].nDistances); } hipFree(nodes); hipFree(visibleNodeIds); hipFree(visibleNWeights); hipFree(visibleNDistances); } /*********************Sort and Wight nodes************************/ __global__ void updateActiveNodesWeightsKernel(defGraph &graph, int verticesNum){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = verticesNum; for (int idx = index; idx < size; idx += stride){ for(int w = 0; w < KNN; w++){ if(graph.visibleNodeIds[idx] == -1)continue; int nidx = idx * graph.visibleNodesNum + w; float ref = dgw; // float ref = dgw * graph.visibleNDistances[idx * graph.visibleNodesNum]; // supposed distance[0] contains leasts distance after sorting if(expWeight){ graph.visibleNWeights[nidx] = exp(-pow(graph.visibleNDistances[nidx],2) / pow(ref,2)); }else{ graph.visibleNWeights[nidx] = graph.visibleNDistances[idx * graph.nodeNum] * dgw / graph.visibleNDistances[nidx]; } } // if(idx == 1) // for(int cnt=0; cnt< graph.visibleNodesNum; cnt++){ // int nId = idx * graph.visibleNodesNum + cnt; // printf("nIds: %d, dist: %f, weight: %f \n", graph.visibleNodeIds[nId], graph.visibleNDistances[nId], graph.visibleNWeights[nId]); // } } } __global__ void sortActiveNodesKernel(defGraph &graph, int verticesNum){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = verticesNum; for (int idx = index; idx < size; idx += stride){ // Go through all neighbour points for (int n = 0; n < graph.visibleNodesNum -1; n++) { if(graph.visibleNodeIds[n] == -1)break; int nIdx = idx * graph.visibleNodesNum + n; // Store current distance and associated nIdx float currDist = graph.visibleNDistances[nIdx]; int currIndex = graph.visibleNodeIds[nIdx]; // Shift values (and indexes) higher int j = nIdx; float tmp_dist = 0; int tmp_index = 0; while (j > idx * graph.visibleNodesNum && graph.visibleNDistances[j-1] > currDist) { tmp_dist = graph.visibleNDistances[j-1]; tmp_index = graph.visibleNodeIds[j-1]; graph.visibleNDistances[j-1] = currDist; graph.visibleNodeIds[j-1] = currIndex; graph.visibleNDistances[j] = tmp_dist; graph.visibleNodeIds[j] = tmp_index; --j; } } } } /*********************Graph to target Mesh************************/ __global__ void updateActiveNodesDistnacesKernel(defGraph &graph, MeshSTD &targetMesh, float4x4 cuPose){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = targetMesh.verticesNum; for (int idx = index; idx < size; idx += stride){ // invoking target vertex 3D position from target mesh geometry::PointXYZ vi = targetMesh.vertices[idx].position; // todo... does it need transformation ? int nIdx = idx * graph.visibleNodesNum; for(int n = 0; n < graph.visibleNodesNum; n++){ // non-visible node indices are filled with -1 if(graph.visibleNodeIds[n] == -1 || n == idx)continue; // invoking neighbour node j vertex position from degGraph geometry::PointXYZ vj = graph.nodes[n].vertex.position; // computing distance between target vertex vi and j-th neighbour node(joint) position float tmp_dist = distance(vi, vj); // excluding absolute 0.0 to avoids nan and inf products if(tmp_dist == 0.0) tmp_dist = 1e-5; // storing distance and id of the neighbour in target node struct graph.visibleNDistances[nIdx] = tmp_dist; graph.visibleNodeIds[nIdx] = n; // index of targeted vertex in the array nIdx++; } } } __global__ void updateNodeWeightsKDTreeKernel(defGraph& graph, kdTree &kdtree){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; for (int idx = index; idx < size; idx += stride){ // make a copy of KDtree in thread registers... kdTree tmpkdtree = kdtree; // graph node to KDtree node type struct kdNode currNode = {idx, {graph.nodes[idx].vertex.position.x, graph.nodes[idx].vertex.position.y, graph.nodes[idx].vertex.position.z}}; // // finding closest neighbors in KDtree structure tmpkdtree.findKNN(currNode); printf("searching for (%g, %g, %g)\n" "found (%g, %g, %g) dist %g\n ID: %d, seen %d nodes\n", currNode.x[0], currNode.x[1], currNode.x[2], tmpkdtree.kdFound->x[0], tmpkdtree.kdFound->x[1], tmpkdtree.kdFound->x[2], sqrt(tmpkdtree.kdDistnaces[0]), tmpkdtree.kdFound->id, tmpkdtree.visited); // for(int w = 0; w < graph.nNum; w++){ // float ref = dgw * tmpkdtree.VisitedNodes[0].distance; // // supposed distance[0] contains leasts distance after sorting // graph.nodes[idx].nWeights[w] = exp(-pow(tmpkdtree.VisitedNodes[w].distance,2) / pow(ref,2)); // } // if(idx == 10) // for(int cnt=0; cnt< graph.nodeNum; cnt++){ // printf("dist: %f, ids: %d, W: %f\n", graph.nodes[idx].nDistances[cnt], graph.nodes[idx].nIds[cnt],graph.nodes[idx].nWeights[cnt]); // } } } void defGraph::updateActiveNeighbourNodes(MeshSTD &targetMesh, float4x4 cuPose){ //load active nodes visibleNodesNum = nodeNum; if(!KDTREE){ // update Euclidian distnaces between vertices and nodes int threads_per_block = 1024; int thread_blocks =(targetMesh.verticesNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateActiveNodesDistnaces >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", visibleNodesNum: " << visibleNodesNum << // std::endl; hipLaunchKernelGGL(( updateActiveNodesDistnacesKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, targetMesh, cuPose); hipDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, ID: %d \n", cnt, visibleNDistances[cnt], visibleNodeIds[cnt]); // } // std::cout << "<<< sortActiveNodes >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; hipLaunchKernelGGL(( sortActiveNodesKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, targetMesh.verticesNum); hipDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, ID: %d \n", cnt, visibleNDistances[cnt], visibleNodeIds[cnt]); // } // std::cout << "<<< updateActiveNodesWeights >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; hipLaunchKernelGGL(( updateActiveNodesWeightsKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, targetMesh.verticesNum); hipDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, weight: %f, ID: %d \n", cnt, visibleNDistances[cnt], visibleNWeights[cnt], visibleNodeIds[cnt]); // } }else{ // build KDtree for input mesh "*defGraphMesh" -> is done init Function // int threads_per_block = 512; // int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeWeightsKDTreeKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; // updateNodeWeightsKDTreeKernel <<< thread_blocks , threads_per_block >>>(*this, *graphKDTree); kdTree *kdtree_h = new kdTree; hipMemcpy(kdtree_h, graphKDTree, sizeof(kdTree), hipMemcpyDeviceToHost); hipDeviceSynchronize(); updateActiveNodesWeightsKDTree(*kdtree_h, targetMesh, cuPose); // for(int cnt = 0; cnt < targetMesh.verticesNum; cnt ++){ // for(int j =0; j<nNum; j++){ // int nidx = cnt * nNum + j; // printf("id: %d, j:%d, %d, %f, %f \n", cnt, j , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); // } // } } } /*********************Graph to depth Image************************/ __global__ void updateActiveNodesDistnacesKernel(defGraph& graph, float* targetdepth, rgbdSensor sensor, float4x4 cuPose){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = sensor.rows * sensor.cols; for (int idx = index; idx < size; idx += stride){ // invoking target pixel 3D position from depth image geometry::PointXYZ vi = getPoint3d(idx, targetdepth[idx], sensor); // todo... does it need transformation??? for(int n = 0; n < graph.visibleNodesNum; n++){ // non-visible node indices are filled with -1 if(graph.visibleNodeIds[n] == -1)break; // invoking neighbour node j vertex position from degGraph geometry::PointXYZ vj = graph.nodes[n].vertex.position; // computing distance between target vertex vi and j-th neighbour node(joint) position float tmp_dist = distance(vi, vj); // excluding absolute 0.0 to avoids nan and inf products if(tmp_dist == 0.0) tmp_dist = 1e-5; // // index of targeted vertex in the array int nIdx = idx * graph.visibleNodesNum + n; // storing distance and id of the neighbour in target node struct graph.visibleNDistances[nIdx] = tmp_dist; graph.visibleNodeIds[nIdx] = n; } // if(idx == 0) { // for(int cnt=0; cnt< graph.nodeNum; cnt++){ // printf("idx: %d, cnt: %d, dist: %f\n",idx, cnt, graph.visibleNDistances[idx * graph.nodeNum + cnt]); // } // } } } void defGraph::updateActiveNeighbourNodes(pyramid &targetImage, rgbdSensor sensor, float4x4 cuPose){ //load active nodes visibleNodesNum = nodeNum; if(!KDTREE){ // update Euclidian distnaces between vertices and nodes int threads_per_block = 1024; int thread_blocks =(targetImage.sensor.rows * targetImage.sensor.cols + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateActiveNodesDistnaces >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", visibleNodesNum: " << visibleNodesNum << // ", rows: " << targetImage.sensor.rows << // ", cols: " << targetImage.sensor.cols << // std::endl; hipLaunchKernelGGL(( updateActiveNodesDistnacesKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, targetImage.depth, targetImage.sensor, cuPose); hipDeviceSynchronize(); // std::cout << "<<< sortActiveNodes >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", rows: " << targetImage.sensor.rows << // ", cols: " << targetImage.sensor.cols << // std::endl; hipLaunchKernelGGL(( sortActiveNodesKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, targetImage.sensor.rows * targetImage.sensor.cols); hipDeviceSynchronize(); // std::cout << "<<< updateActiveNodesWeights >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", rows: " << targetImage.sensor.rows << // ", cols: " << targetImage.sensor.cols << // std::endl; hipLaunchKernelGGL(( updateActiveNodesWeightsKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, targetImage.sensor.rows * targetImage.sensor.cols); hipDeviceSynchronize(); }else{ // build KDtree for input mesh "*defGraphMesh" -> is done init Function // int threads_per_block = 512; // int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeWeightsKDTreeKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; // updateNodeWeightsKDTreeKernel <<< thread_blocks , threads_per_block >>>(*this, *graphKDTree); kdTree *kdtree_h = new kdTree; hipMemcpy(kdtree_h, graphKDTree, sizeof(kdTree), hipMemcpyDeviceToHost); hipDeviceSynchronize(); updateActiveNodesWeightsKDTree(*kdtree_h, targetImage, sensor, cuPose); // int size = sensor.rows * sensor.cols; // for(int cnt = 0; cnt < size; cnt ++){ // for(int w =0; w < nNum; w++){ // int nidx = cnt * nNum + w; // printf("id: %d, jw:%d, %d, %f, %f \n", cnt, w , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); // } // } } } /*******************In graph Connections**************************/ __global__ void updateNodeWeightsKernel(defGraph& graph){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; for (int idx = index; idx < size; idx += stride){ // In case of using radius reach for the neighborhood, this parameter will show number of close nodes for(int w = 0; w < graph.nNum; w++){ float ref = dgw; // float ref = dgw * graph.nodes[idx].nDistances[0]; if(graph.nodes[idx].nDistances[w] == 0.0f)continue; // supposed distance[0] contains leasts distance after sorting if(expWeight){ graph.nodes[idx].nWeights[w] = exp(-pow(graph.nodes[idx].nDistances[w],2) / pow(ref,2)); }else{ graph.nodes[idx].nWeights[w] = graph.nodes[idx].nDistances[0] * dgw / graph.nodes[idx].nDistances[w]; } } // if(idx == 10) // for(int cnt=0; cnt< graph.nodeNum; cnt++){ // printf("dist: %f, ids: %d, W: %f\n", graph.nodes[idx].nDistances[cnt], graph.nodes[idx].nIds[cnt],graph.nodes[idx].nWeights[cnt]); // } } } __global__ void updateNodeDistnacesKernel(defGraph& graph){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; for (int idx = index; idx < size; idx += stride){ // invoking target node vertex position from degGraph geometry::Vertex vi = graph.nodes[idx].vertex; int nIdx = 0; for(int n = 0; n < graph.nodeNum; n++){ // shouldn't add node itself as a neighbour in neighbour list if(n == idx) continue; // invoking neighbour node j vertex position from degGraph geometry::Vertex vj = graph.nodes[n].vertex; // computing distance between target node vi and i-th neighbour vertex position float tmp_dist = distance(vi.position, vj.position); // excluding absolute 0.0 to avoid nan and inf products if(tmp_dist < 10e-5) tmp_dist = 10e-5; // storing distance and id of the neighbour in target node struct graph.nodes[idx].nDistances[nIdx] = tmp_dist; graph.nodes[idx].nIds[nIdx] = n; nIdx++; } } } __global__ void sortNeighbourNodesKernel(defGraph& graph){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; for (int idx = index; idx < size; idx += stride){ // Go through all neighbour points for (int i = 1; i < graph.nodeNum -1; i++) { // Store current distance and associated index float currDist = graph.nodes[idx].nDistances[i]; int currIndex = graph.nodes[idx].nIds[i]; // Shift values (and indexes) higher that the current distance to the right int j = i; float tmp_dist = 0; int tmp_index = 0; while (j > 0 && graph.nodes[idx].nDistances[j-1] > currDist) { tmp_dist = graph.nodes[idx].nDistances[j-1]; tmp_index = graph.nodes[idx].nIds[j-1]; graph.nodes[idx].nDistances[j-1] = currDist; graph.nodes[idx].nIds[j-1] = currIndex; graph.nodes[idx].nDistances[j] = tmp_dist; graph.nodes[idx].nIds[j] = tmp_index; --j; } } // if(idx == 10) // for(int cnt=0; cnt< graph.nodeNum; cnt++){ // printf("dist: %f, ids: %d\n", graph.nodes[idx].nDistances[cnt], graph.nodes[idx].nIds[cnt]); // } } } void defGraph::defGraphUpdateNodes(void){ if(!KDTREE){ // build KDtree for input mesh "*defGraphMesh" // find KNN for each vertex in mesh // update Euclidian distnaces between vertices and nodes int threads_per_block = 1024; int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeDistnacesKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", nodeNum: " << nodeNum << // std::endl; hipLaunchKernelGGL(( updateNodeDistnacesKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this); hipDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, ID: %d \n", cnt, nodes[0].nDistances[cnt], nodes[0].nIds[cnt]); // } // Sort vertices based on their distances threads_per_block = 512; thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< sortNeighbourNodesKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; hipLaunchKernelGGL(( sortNeighbourNodesKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this); hipDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, ID: %d \n", cnt, nodes[0].nDistances[cnt], nodes[0].nIds[cnt]); // } threads_per_block = 512; thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeWeightsKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; hipLaunchKernelGGL(( updateNodeWeightsKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this); hipDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, weight: %f, ID: %d \n", cnt, nodes[0].nDistances[cnt], nodes[0].nWeights[cnt], nodes[0].nIds[cnt]); // } }else{ // Update GraphStructure with KDtree - beforehand call GraphKDtreeInit ... // build KDtree for input mesh "*defGraphMesh" -> is done init Function // int threads_per_block = 512; // int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeWeightsKDTreeKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; // updateNodeWeightsKDTreeKernel <<< thread_blocks , threads_per_block >>>(*this, *graphKDTree); kdTree *kdtree_h = new kdTree; hipMemcpy(kdtree_h, graphKDTree, sizeof(kdTree), hipMemcpyDeviceToHost); hipDeviceSynchronize(); updateNodeWeightsKDTree(*kdtree_h); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, Dist: %f, weight:%f, ID: %d \n", cnt, nodes[0].nDistances[cnt], nodes[0].nWeights[cnt], nodes[0].nIds[cnt]); // } } } /*****************************************************************/ void defGraph::updateNodeWeightsKDTree(kdTree &kdtree){ defGraphNode *tmpnodes = new defGraphNode[NODE_NUM]; // copy Query KDtree to Device memory hipMemcpy(tmpnodes, this->nodes, sizeof(defGraphNode) * NODE_NUM, hipMemcpyDeviceToHost); hipDeviceSynchronize(); for (int idx = 0; idx < nodeNum; idx ++){ // graph node to KDtree node type struct kdNode currNode = {idx, {tmpnodes[idx].vertex.position.x, tmpnodes[idx].vertex.position.y, tmpnodes[idx].vertex.position.z}}; // finding closest neighbors in KDtree structure kdtree.findKNN(currNode); if(kdtree.VisitedNodes[0].distance < 1e-5)kdtree.VisitedNodes[0].distance = 1e-5; for(int w = 0; w < nNum; w++){ float ref = dgw; // float ref = NODE_NUM/dgw * kdtree.VisitedNodes[0].distance; // supposed distance[0] contains leasts distance after sorting if(kdtree.VisitedNodes[w].distance == 0.0f)continue; if(expWeight){ nodes[idx].nWeights[w] = exp(-pow(kdtree.VisitedNodes[w].distance, 2) / pow(ref,2)); }else{ nodes[idx].nWeights[w] = kdtree.VisitedNodes[0].distance * dgw / kdtree.VisitedNodes[w].distance; } nodes[idx].nDistances[w] = kdtree.VisitedNodes[w].distance; nodes[idx].nIds[w] = kdtree.VisitedNodes[w].id; } } } void defGraph::updateActiveNodesWeightsKDTree(kdTree &kdtree, MeshSTD &targetMesh, float4x4 cuPose){ for (int idx = 0; idx < targetMesh.verticesNum; idx ++){ // graph node to KDtree node type struct kdNode currNode = {idx, {targetMesh.vertices[idx].position.x, targetMesh.vertices[idx].position.y, targetMesh.vertices[idx].position.z}}; // finding closest neighbors in KDtree structure kdtree.findKNN(currNode); for(int w = 0; w < nNum; w++){ int nidx = idx * nNum + w; float ref = dgw; // float ref = dgw * kdtree.VisitedNodes[0].distance; if(expWeight){ visibleNWeights[nidx] = exp(-pow(kdtree.VisitedNodes[w].distance,2) / pow(ref,2)); }else{ visibleNWeights[nidx] = kdtree.VisitedNodes[0].distance * dgw / kdtree.VisitedNodes[w].distance; } visibleNDistances[nidx] = kdtree.VisitedNodes[w].distance; visibleNodeIds[nidx] = kdtree.VisitedNodes[w].id; // printf("id: %d, w:%d, %d, %f, %f \n", idx, w , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); } } } void defGraph::updateActiveNodesWeightsKDTree(kdTree &kdtree, pyramid &targetImage, rgbdSensor sensor, float4x4 cuPose){ int size = sensor.rows * sensor.cols; for (int idx = 0; idx < size; idx ++){ // graph node to KDtree node type // todo... check the range again !!!! if(targetImage.depth[idx] < DEPTH_MAX && targetImage.depth[idx] > DEPTH_MIN){ geometry::PointXYZ vi = getPoint3d(idx, targetImage.depth[idx], sensor); struct kdNode currNode = {idx, {vi.x, vi.y, vi.z}}; // finding closest neighbors in KDtree structure kdtree.findKNN(currNode); for(int w = 0; w < nNum; w++){ int nidx = idx * nNum + w; float ref = dgw; // float ref = dgw * kdtree.VisitedNodes[0].distance; if(expWeight){ visibleNWeights[nidx] = exp(-pow(kdtree.VisitedNodes[w].distance,2) / pow(ref,2)); }else{ visibleNWeights[nidx] = kdtree.VisitedNodes[0].distance * dgw / kdtree.VisitedNodes[w].distance; } visibleNDistances[nidx] = kdtree.VisitedNodes[w].distance; visibleNodeIds[nidx] = kdtree.VisitedNodes[w].id; // printf("id: %d, w:%d, %d, %f, %f \n", idx, w , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); } } } } /*****************************************************************/ void defGraph::randomNum(int randNums[], int elements, int range){ for (int i = 0; i < elements; i++){ bool same; do{ same = false; randNums[i] = rand() % range; // Check if the newly generated number is a duplicate: for (int check = 0; check < i; check++){ if (randNums[i] == randNums[check]){ same = true; break; } } } while (same); } } void defGraph::sampleDownMeshDeice(void){ float *randDevData,*hostData; int n = NODE_NUM; hipMalloc((void **)&randDevData, n * sizeof(float)); hostData = (float *)calloc(n, sizeof(float)); /* initialize random seed: */ hiprandGenerator_t gen; /* Create pseudo-random number generator */ hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); /* Set seed */ hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL); /* Generate n floats on device */ hiprandGenerateUniform(gen, randDevData, n); /* Copy device memory to host */ hipMemcpy(hostData, randDevData, n * sizeof(float),hipMemcpyDeviceToHost); // update node ids querry (subsampling origianl mesh to the max number of nodes "NODE_NUM") // take fisrt n nodes as deformation graph nodes for(int cnt=0; cnt < NODE_NUM; cnt++){ int num = static_cast<int>(hostData[cnt] * this->defGraphMesh.verticesNum); this->nodes[cnt].id = cnt; this->nodes[cnt].vertex = this->defGraphMesh.vertices[num]; this->nodes[cnt].vertexID = num; // printf("%d: %d \n",cnt, num); } hiprandDestroyGenerator(gen); } void defGraph::sampleDownMeshHost(int obsNum, int mode){ // update node ids querry (subsampling origianl mesh to the max number of nodes "NODE_NUM") // take fisrt n nodes as deformation graph nodes // image or random nodes assignment std::cout << "activeNodesNum: "<< activeNodesNum << ", mode: " << mode << std::endl; if(mode == 0){ //declare array int *nodeIds_rand; //random number generator srand(static_cast<int>(time(0))); nodeIds_rand = new int[activeNodesNum]; randomNum(nodeIds_rand, activeNodesNum, obsNum); for(int cnt = 0; cnt < activeNodesNum; cnt++){ this->nodes[cnt].id = cnt; this->nodes[cnt].vertex = this->defGraphMesh.vertices[nodeIds_rand[cnt]]; this->nodes[cnt].vertexID = nodeIds_rand[cnt]; // std::cout << this->nodes[cnt].id << ", " << this->nodes[cnt].vertexID << std::endl; } }else{ // identical nodes as mesh vertices in order of mesh for(int cnt=0; cnt < activeNodesNum; cnt++){ this->nodes[cnt].id = cnt; this->nodes[cnt].vertex = this->defGraphMesh.vertices[cnt]; this->nodes[cnt].vertexID = cnt; // std::cout << this->nodes[cnt].id << ", " << this->nodes[cnt].vertexID << std::endl; } } } /*****************************************************************/ __global__ void updateNodesDQKernel(defGraph& graph, float* dqVector){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; math::EulerAngles EA; for (int idx = index; idx < size; idx += stride){ EA.roll = dqVector[idx * 6 + 0]; EA.pitch = dqVector[idx * 6 + 1]; EA.yaw = dqVector[idx * 6 + 2]; graph.nodes[idx].dq = math::dualQuat(math::Quaternion(EA), make_float3(dqVector[idx * 6 + 3], dqVector[idx * 6 + 4], dqVector[idx * 6 + 5])); // printf("%f, %f, %f, %f, %f, %f \n", EA.roll, EA.pitch, EA.yaw ,dqVector[idx * 6 + 3], dqVector[idx * 6 + 4],dqVector[idx * 6 + 5]); } } void defGraph::updateNodesDQ(float* dqVector){ // update Euclidian distnaces between nodes int threads_per_block = 512; int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodesDQ >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; hipLaunchKernelGGL(( updateNodesDQKernel), dim3(thread_blocks), dim3(threads_per_block), 0, 0, *this, dqVector); hipDeviceSynchronize(); } void defGraph::writeDefGraphToFile(const char* fileName, int obsNum, int mode){ defGraphNode *h_nodes = new defGraphNode[NODE_NUM]; hipMemcpy(h_nodes, this->nodes, sizeof(defGraphNode) * NODE_NUM, hipMemcpyDeviceToHost); hipDeviceSynchronize(); std::ofstream file(fileName,std::ios::ate); if (file.is_open()){ if(mode == 0){ file << fileName <<", NODE_NUM: " << NODE_NUM << std::endl; for (size_t i = 0; i < nodeNum; i++){ file << "[ " << std::endl; file << "id: " << h_nodes[i].id << std::endl; file << "nNum: " << nNum << std::endl; file << "nodeIds: " ; for(int j = 0; j < nNum; j++){ file << h_nodes[i].nIds[j] << " "; } file << std::endl; file << "VertexPose: " << h_nodes[i].vertex.position.x << " " << h_nodes[i].vertex.position.y << " " << h_nodes[i].vertex.position.z << " " << std::endl; file << "VertexNormal: " << h_nodes[i].vertex.normal.x << " " << h_nodes[i].vertex.normal.y << " " << h_nodes[i].vertex.normal.z << " " << std::endl; file << "VertexColor: " << h_nodes[i].vertex.color.x << " " << h_nodes[i].vertex.color.y << " " << h_nodes[i].vertex.color.z << " " << std::endl; file << "dq: " << h_nodes[i].dq << std::endl; file << "nDistances: " ; for(int j = 0; j < nNum; j++){ file << h_nodes[i].nDistances[j] << " "; } file << std::endl; file << "nWeights: " ; for(int j = 0; j < nNum; j++){ file << h_nodes[i].nWeights[j] << " "; } file << std::endl; file << "]," << std::endl; } }else if(mode == 1){ // for(int cnt = 0; cnt < obsNum; cnt ++){ // for(int j =0; j<nNum; j++){ // int nidx = cnt * nNum + j; // printf("xcid: %d, j:%d, %d, %f, %f \n", cnt, j , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); // } // } file << fileName <<", ACTIVE_NODE_NUM: " << visibleNodesNum << ", ObsNum: " << obsNum<< std::endl; int i = 0; while(true){ int index = i * KNN; file << "[ " << std::endl; file << "pixelID/vertexID: " << i << std::endl; file << "visibleNodeIds: " ; for(int j = 0; j < nNum; j++){ file << this->visibleNodeIds[index + j] << " "; } file << std::endl; file << "visibleNDistances: " ; for(int j = 0; j < nNum; j++){ file << this->visibleNDistances[index + j] << " "; } file << std::endl; file << "visibleNWeights: " ; for(int j = 0; j < nNum; j++){ file << this->visibleNWeights[index + j] << " "; } file << std::endl; file << "]," << std::endl; i++; if(i >= obsNum || i >= 500)break; } } file.close(); } else std::cout << "Unable to open file"; delete[] h_nodes; } } // namespace geometry } // namespace DynaMap
e3f512827c1ad07a3bf4f2861a3dd4925955c5c3.cu
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % by: Alireza Ahmadi % % University of Bonn- MSc Robotics & Geodetic Engineering% % [email protected] % % AlirezaAhmadi.xyz % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ #include "defGraph.h" namespace DynaMap{ namespace geometry{ defGraph::defGraph(void){} defGraph::~defGraph(void){ Free(); } void defGraph::init(MeshSTD &srcMesh, int ObservationNum, int mode){ nodeNum = NODE_NUM; activeNodesNum = (nodeNum > ObservationNum) ? ObservationNum : nodeNum; defGraphMesh = srcMesh; nNum = KNN; visibleNodesNum = activeNodesNum; initGraphNodes(srcMesh, mode); if(!KDTREE){ cudaMallocManaged(&visibleNodeIds, sizeof(int) * ObservationNum * nodeNum); cudaMallocManaged(&visibleNWeights, sizeof(float) * ObservationNum * nodeNum); cudaMallocManaged(&visibleNDistances, sizeof(float) * ObservationNum * nodeNum); cudaDeviceSynchronize(); }else{ cudaMallocManaged(&graphKDTree, sizeof(kdTree)); cudaDeviceSynchronize(); graphKDTree->init(nodeNum); // allocating Query nodes on Host memory struct kdNode *kdQuery_h; kdQuery_h =(struct kdNode*) calloc(nodeNum, sizeof(struct kdNode)); // loading KDtree Query nodes from Graph Nodes for(int n = 0; n < nodeNum; n++){ kdQuery_h[n].id = n; kdQuery_h[n].x[0] = nodes[n].vertex.position.x; kdQuery_h[n].x[1] = nodes[n].vertex.position.y; kdQuery_h[n].x[2] = nodes[n].vertex.position.z; // std::cout << kdQuery[n].id << ", "<<kdQuery[n].x[0] << ", " << kdQuery[n].x[1] << ", " <<kdQuery[n].x[2] << std::endl; } // copy Query KDtree to Device memory cudaMemcpy(graphKDTree->kdQuery, kdQuery_h, sizeof(struct kdNode) * nodeNum, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); // allocating Root of KDTree on Host memory struct kdNode *kdRoot_h; kdRoot_h = (struct kdNode*) calloc(nodeNum, sizeof(struct kdNode)); // build DKTree on Host memory kdRoot_h = graphKDTree->buildTree(kdQuery_h, nodeNum, 0, 3); // copy KDtree to Device memory cudaMemcpy(graphKDTree->kdRoot, kdRoot_h, sizeof(struct kdNode) * nodeNum, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); cudaMallocManaged(&visibleNodeIds, sizeof(int) * ObservationNum * nNum); cudaMallocManaged(&visibleNWeights, sizeof(float) * ObservationNum * nNum); cudaMallocManaged(&visibleNDistances, sizeof(float) * ObservationNum * nNum); cudaDeviceSynchronize(); } std::cout << "Graph nodeNum: " << nodeNum << ", KDtree: " << KDTREE << std::endl; } void Free(void){} // ******************************************************************* void defGraph::initGraphNodes(MeshSTD &srcMesh, int mode){ cudaMallocManaged(&nodes, sizeof(defGraphNode) * nodeNum); if(KDTREE == false){ for(int cnt=0; cnt < nodeNum; cnt++){ cudaMallocManaged(&nodes[cnt].nIds, sizeof(int) * nodeNum); cudaMallocManaged(&nodes[cnt].nWeights, sizeof(float) * nodeNum); cudaMallocManaged(&nodes[cnt].nDistances, sizeof(float) * nodeNum); } }else{ // todo..... KDTree...!!! /// issue............. for(int cnt=0; cnt < nodeNum; cnt++){ cudaMallocManaged(&nodes[cnt].nIds, sizeof(int) * nodeNum); cudaMallocManaged(&nodes[cnt].nWeights, sizeof(float) * nodeNum); cudaMallocManaged(&nodes[cnt].nDistances, sizeof(float) * nodeNum); } } cudaDeviceSynchronize(); // mesh vertices to initialize graph nodes sampleDownMeshHost(srcMesh.verticesNum, mode); // initialize nodes dual-quaternions to identity for (size_t i = 0; i < nodeNum; i++){ nodes[i].dq = math::dualQuat::identity(); } } void defGraph::Free(void){ cudaDeviceSynchronize(); if(KDTREE){ cudaFree(graphKDTree); } for(int cnt=0; cnt < NODE_NUM; cnt++){ cudaFree(nodes[cnt].nIds); cudaFree(nodes[cnt].nWeights); cudaFree(nodes[cnt].nDistances); } cudaFree(nodes); cudaFree(visibleNodeIds); cudaFree(visibleNWeights); cudaFree(visibleNDistances); } /*********************Sort and Wight nodes************************/ __global__ void updateActiveNodesWeightsKernel(defGraph &graph, int verticesNum){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = verticesNum; for (int idx = index; idx < size; idx += stride){ for(int w = 0; w < KNN; w++){ if(graph.visibleNodeIds[idx] == -1)continue; int nidx = idx * graph.visibleNodesNum + w; float ref = dgw; // float ref = dgw * graph.visibleNDistances[idx * graph.visibleNodesNum]; // supposed distance[0] contains leasts distance after sorting if(expWeight){ graph.visibleNWeights[nidx] = exp(-pow(graph.visibleNDistances[nidx],2) / pow(ref,2)); }else{ graph.visibleNWeights[nidx] = graph.visibleNDistances[idx * graph.nodeNum] * dgw / graph.visibleNDistances[nidx]; } } // if(idx == 1) // for(int cnt=0; cnt< graph.visibleNodesNum; cnt++){ // int nId = idx * graph.visibleNodesNum + cnt; // printf("nIds: %d, dist: %f, weight: %f \n", graph.visibleNodeIds[nId], graph.visibleNDistances[nId], graph.visibleNWeights[nId]); // } } } __global__ void sortActiveNodesKernel(defGraph &graph, int verticesNum){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = verticesNum; for (int idx = index; idx < size; idx += stride){ // Go through all neighbour points for (int n = 0; n < graph.visibleNodesNum -1; n++) { if(graph.visibleNodeIds[n] == -1)break; int nIdx = idx * graph.visibleNodesNum + n; // Store current distance and associated nIdx float currDist = graph.visibleNDistances[nIdx]; int currIndex = graph.visibleNodeIds[nIdx]; // Shift values (and indexes) higher int j = nIdx; float tmp_dist = 0; int tmp_index = 0; while (j > idx * graph.visibleNodesNum && graph.visibleNDistances[j-1] > currDist) { tmp_dist = graph.visibleNDistances[j-1]; tmp_index = graph.visibleNodeIds[j-1]; graph.visibleNDistances[j-1] = currDist; graph.visibleNodeIds[j-1] = currIndex; graph.visibleNDistances[j] = tmp_dist; graph.visibleNodeIds[j] = tmp_index; --j; } } } } /*********************Graph to target Mesh************************/ __global__ void updateActiveNodesDistnacesKernel(defGraph &graph, MeshSTD &targetMesh, float4x4 cuPose){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = targetMesh.verticesNum; for (int idx = index; idx < size; idx += stride){ // invoking target vertex 3D position from target mesh geometry::PointXYZ vi = targetMesh.vertices[idx].position; // todo... does it need transformation ? int nIdx = idx * graph.visibleNodesNum; for(int n = 0; n < graph.visibleNodesNum; n++){ // non-visible node indices are filled with -1 if(graph.visibleNodeIds[n] == -1 || n == idx)continue; // invoking neighbour node j vertex position from degGraph geometry::PointXYZ vj = graph.nodes[n].vertex.position; // computing distance between target vertex vi and j-th neighbour node(joint) position float tmp_dist = distance(vi, vj); // excluding absolute 0.0 to avoids nan and inf products if(tmp_dist == 0.0) tmp_dist = 1e-5; // storing distance and id of the neighbour in target node struct graph.visibleNDistances[nIdx] = tmp_dist; graph.visibleNodeIds[nIdx] = n; // index of targeted vertex in the array nIdx++; } } } __global__ void updateNodeWeightsKDTreeKernel(defGraph& graph, kdTree &kdtree){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; for (int idx = index; idx < size; idx += stride){ // make a copy of KDtree in thread registers... kdTree tmpkdtree = kdtree; // graph node to KDtree node type struct kdNode currNode = {idx, {graph.nodes[idx].vertex.position.x, graph.nodes[idx].vertex.position.y, graph.nodes[idx].vertex.position.z}}; // // finding closest neighbors in KDtree structure tmpkdtree.findKNN(currNode); printf("searching for (%g, %g, %g)\n" "found (%g, %g, %g) dist %g\n ID: %d, seen %d nodes\n", currNode.x[0], currNode.x[1], currNode.x[2], tmpkdtree.kdFound->x[0], tmpkdtree.kdFound->x[1], tmpkdtree.kdFound->x[2], sqrt(tmpkdtree.kdDistnaces[0]), tmpkdtree.kdFound->id, tmpkdtree.visited); // for(int w = 0; w < graph.nNum; w++){ // float ref = dgw * tmpkdtree.VisitedNodes[0].distance; // // supposed distance[0] contains leasts distance after sorting // graph.nodes[idx].nWeights[w] = exp(-pow(tmpkdtree.VisitedNodes[w].distance,2) / pow(ref,2)); // } // if(idx == 10) // for(int cnt=0; cnt< graph.nodeNum; cnt++){ // printf("dist: %f, ids: %d, W: %f\n", graph.nodes[idx].nDistances[cnt], graph.nodes[idx].nIds[cnt],graph.nodes[idx].nWeights[cnt]); // } } } void defGraph::updateActiveNeighbourNodes(MeshSTD &targetMesh, float4x4 cuPose){ //load active nodes visibleNodesNum = nodeNum; if(!KDTREE){ // update Euclidian distnaces between vertices and nodes int threads_per_block = 1024; int thread_blocks =(targetMesh.verticesNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateActiveNodesDistnaces >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", visibleNodesNum: " << visibleNodesNum << // std::endl; updateActiveNodesDistnacesKernel<<<thread_blocks, threads_per_block>>>(*this, targetMesh, cuPose); cudaDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, ID: %d \n", cnt, visibleNDistances[cnt], visibleNodeIds[cnt]); // } // std::cout << "<<< sortActiveNodes >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; sortActiveNodesKernel<<<thread_blocks, threads_per_block>>>(*this, targetMesh.verticesNum); cudaDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, ID: %d \n", cnt, visibleNDistances[cnt], visibleNodeIds[cnt]); // } // std::cout << "<<< updateActiveNodesWeights >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; updateActiveNodesWeightsKernel<<<thread_blocks, threads_per_block>>>(*this, targetMesh.verticesNum); cudaDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, weight: %f, ID: %d \n", cnt, visibleNDistances[cnt], visibleNWeights[cnt], visibleNodeIds[cnt]); // } }else{ // build KDtree for input mesh "*defGraphMesh" -> is done init Function // int threads_per_block = 512; // int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeWeightsKDTreeKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; // updateNodeWeightsKDTreeKernel <<< thread_blocks , threads_per_block >>>(*this, *graphKDTree); kdTree *kdtree_h = new kdTree; cudaMemcpy(kdtree_h, graphKDTree, sizeof(kdTree), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); updateActiveNodesWeightsKDTree(*kdtree_h, targetMesh, cuPose); // for(int cnt = 0; cnt < targetMesh.verticesNum; cnt ++){ // for(int j =0; j<nNum; j++){ // int nidx = cnt * nNum + j; // printf("id: %d, j:%d, %d, %f, %f \n", cnt, j , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); // } // } } } /*********************Graph to depth Image************************/ __global__ void updateActiveNodesDistnacesKernel(defGraph& graph, float* targetdepth, rgbdSensor sensor, float4x4 cuPose){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = sensor.rows * sensor.cols; for (int idx = index; idx < size; idx += stride){ // invoking target pixel 3D position from depth image geometry::PointXYZ vi = getPoint3d(idx, targetdepth[idx], sensor); // todo... does it need transformation??? for(int n = 0; n < graph.visibleNodesNum; n++){ // non-visible node indices are filled with -1 if(graph.visibleNodeIds[n] == -1)break; // invoking neighbour node j vertex position from degGraph geometry::PointXYZ vj = graph.nodes[n].vertex.position; // computing distance between target vertex vi and j-th neighbour node(joint) position float tmp_dist = distance(vi, vj); // excluding absolute 0.0 to avoids nan and inf products if(tmp_dist == 0.0) tmp_dist = 1e-5; // // index of targeted vertex in the array int nIdx = idx * graph.visibleNodesNum + n; // storing distance and id of the neighbour in target node struct graph.visibleNDistances[nIdx] = tmp_dist; graph.visibleNodeIds[nIdx] = n; } // if(idx == 0) { // for(int cnt=0; cnt< graph.nodeNum; cnt++){ // printf("idx: %d, cnt: %d, dist: %f\n",idx, cnt, graph.visibleNDistances[idx * graph.nodeNum + cnt]); // } // } } } void defGraph::updateActiveNeighbourNodes(pyramid &targetImage, rgbdSensor sensor, float4x4 cuPose){ //load active nodes visibleNodesNum = nodeNum; if(!KDTREE){ // update Euclidian distnaces between vertices and nodes int threads_per_block = 1024; int thread_blocks =(targetImage.sensor.rows * targetImage.sensor.cols + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateActiveNodesDistnaces >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", visibleNodesNum: " << visibleNodesNum << // ", rows: " << targetImage.sensor.rows << // ", cols: " << targetImage.sensor.cols << // std::endl; updateActiveNodesDistnacesKernel<<<thread_blocks, threads_per_block>>>(*this, targetImage.depth, targetImage.sensor, cuPose); cudaDeviceSynchronize(); // std::cout << "<<< sortActiveNodes >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", rows: " << targetImage.sensor.rows << // ", cols: " << targetImage.sensor.cols << // std::endl; sortActiveNodesKernel<<<thread_blocks, threads_per_block>>>(*this, targetImage.sensor.rows * targetImage.sensor.cols); cudaDeviceSynchronize(); // std::cout << "<<< updateActiveNodesWeights >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", rows: " << targetImage.sensor.rows << // ", cols: " << targetImage.sensor.cols << // std::endl; updateActiveNodesWeightsKernel<<<thread_blocks, threads_per_block>>>(*this, targetImage.sensor.rows * targetImage.sensor.cols); cudaDeviceSynchronize(); }else{ // build KDtree for input mesh "*defGraphMesh" -> is done init Function // int threads_per_block = 512; // int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeWeightsKDTreeKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; // updateNodeWeightsKDTreeKernel <<< thread_blocks , threads_per_block >>>(*this, *graphKDTree); kdTree *kdtree_h = new kdTree; cudaMemcpy(kdtree_h, graphKDTree, sizeof(kdTree), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); updateActiveNodesWeightsKDTree(*kdtree_h, targetImage, sensor, cuPose); // int size = sensor.rows * sensor.cols; // for(int cnt = 0; cnt < size; cnt ++){ // for(int w =0; w < nNum; w++){ // int nidx = cnt * nNum + w; // printf("id: %d, jw:%d, %d, %f, %f \n", cnt, w , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); // } // } } } /*******************In graph Connections**************************/ __global__ void updateNodeWeightsKernel(defGraph& graph){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; for (int idx = index; idx < size; idx += stride){ // In case of using radius reach for the neighborhood, this parameter will show number of close nodes for(int w = 0; w < graph.nNum; w++){ float ref = dgw; // float ref = dgw * graph.nodes[idx].nDistances[0]; if(graph.nodes[idx].nDistances[w] == 0.0f)continue; // supposed distance[0] contains leasts distance after sorting if(expWeight){ graph.nodes[idx].nWeights[w] = exp(-pow(graph.nodes[idx].nDistances[w],2) / pow(ref,2)); }else{ graph.nodes[idx].nWeights[w] = graph.nodes[idx].nDistances[0] * dgw / graph.nodes[idx].nDistances[w]; } } // if(idx == 10) // for(int cnt=0; cnt< graph.nodeNum; cnt++){ // printf("dist: %f, ids: %d, W: %f\n", graph.nodes[idx].nDistances[cnt], graph.nodes[idx].nIds[cnt],graph.nodes[idx].nWeights[cnt]); // } } } __global__ void updateNodeDistnacesKernel(defGraph& graph){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; for (int idx = index; idx < size; idx += stride){ // invoking target node vertex position from degGraph geometry::Vertex vi = graph.nodes[idx].vertex; int nIdx = 0; for(int n = 0; n < graph.nodeNum; n++){ // shouldn't add node itself as a neighbour in neighbour list if(n == idx) continue; // invoking neighbour node j vertex position from degGraph geometry::Vertex vj = graph.nodes[n].vertex; // computing distance between target node vi and i-th neighbour vertex position float tmp_dist = distance(vi.position, vj.position); // excluding absolute 0.0 to avoid nan and inf products if(tmp_dist < 10e-5) tmp_dist = 10e-5; // storing distance and id of the neighbour in target node struct graph.nodes[idx].nDistances[nIdx] = tmp_dist; graph.nodes[idx].nIds[nIdx] = n; nIdx++; } } } __global__ void sortNeighbourNodesKernel(defGraph& graph){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; for (int idx = index; idx < size; idx += stride){ // Go through all neighbour points for (int i = 1; i < graph.nodeNum -1; i++) { // Store current distance and associated index float currDist = graph.nodes[idx].nDistances[i]; int currIndex = graph.nodes[idx].nIds[i]; // Shift values (and indexes) higher that the current distance to the right int j = i; float tmp_dist = 0; int tmp_index = 0; while (j > 0 && graph.nodes[idx].nDistances[j-1] > currDist) { tmp_dist = graph.nodes[idx].nDistances[j-1]; tmp_index = graph.nodes[idx].nIds[j-1]; graph.nodes[idx].nDistances[j-1] = currDist; graph.nodes[idx].nIds[j-1] = currIndex; graph.nodes[idx].nDistances[j] = tmp_dist; graph.nodes[idx].nIds[j] = tmp_index; --j; } } // if(idx == 10) // for(int cnt=0; cnt< graph.nodeNum; cnt++){ // printf("dist: %f, ids: %d\n", graph.nodes[idx].nDistances[cnt], graph.nodes[idx].nIds[cnt]); // } } } void defGraph::defGraphUpdateNodes(void){ if(!KDTREE){ // build KDtree for input mesh "*defGraphMesh" // find KNN for each vertex in mesh // update Euclidian distnaces between vertices and nodes int threads_per_block = 1024; int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeDistnacesKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // ", nodeNum: " << nodeNum << // std::endl; updateNodeDistnacesKernel<<<thread_blocks, threads_per_block>>>(*this); cudaDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, ID: %d \n", cnt, nodes[0].nDistances[cnt], nodes[0].nIds[cnt]); // } // Sort vertices based on their distances threads_per_block = 512; thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< sortNeighbourNodesKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; sortNeighbourNodesKernel<<<thread_blocks, threads_per_block>>>(*this); cudaDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, ID: %d \n", cnt, nodes[0].nDistances[cnt], nodes[0].nIds[cnt]); // } threads_per_block = 512; thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeWeightsKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; updateNodeWeightsKernel<<<thread_blocks, threads_per_block>>>(*this); cudaDeviceSynchronize(); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, dist: %f, weight: %f, ID: %d \n", cnt, nodes[0].nDistances[cnt], nodes[0].nWeights[cnt], nodes[0].nIds[cnt]); // } }else{ // Update GraphStructure with KDtree - beforehand call GraphKDtreeInit ... // build KDtree for input mesh "*defGraphMesh" -> is done init Function // int threads_per_block = 512; // int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodeWeightsKDTreeKernel >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; // updateNodeWeightsKDTreeKernel <<< thread_blocks , threads_per_block >>>(*this, *graphKDTree); kdTree *kdtree_h = new kdTree; cudaMemcpy(kdtree_h, graphKDTree, sizeof(kdTree), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); updateNodeWeightsKDTree(*kdtree_h); // for(int cnt=0; cnt< KNN; cnt++){ // printf("cnt: %d, Dist: %f, weight:%f, ID: %d \n", cnt, nodes[0].nDistances[cnt], nodes[0].nWeights[cnt], nodes[0].nIds[cnt]); // } } } /*****************************************************************/ void defGraph::updateNodeWeightsKDTree(kdTree &kdtree){ defGraphNode *tmpnodes = new defGraphNode[NODE_NUM]; // copy Query KDtree to Device memory cudaMemcpy(tmpnodes, this->nodes, sizeof(defGraphNode) * NODE_NUM, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int idx = 0; idx < nodeNum; idx ++){ // graph node to KDtree node type struct kdNode currNode = {idx, {tmpnodes[idx].vertex.position.x, tmpnodes[idx].vertex.position.y, tmpnodes[idx].vertex.position.z}}; // finding closest neighbors in KDtree structure kdtree.findKNN(currNode); if(kdtree.VisitedNodes[0].distance < 1e-5)kdtree.VisitedNodes[0].distance = 1e-5; for(int w = 0; w < nNum; w++){ float ref = dgw; // float ref = NODE_NUM/dgw * kdtree.VisitedNodes[0].distance; // supposed distance[0] contains leasts distance after sorting if(kdtree.VisitedNodes[w].distance == 0.0f)continue; if(expWeight){ nodes[idx].nWeights[w] = exp(-pow(kdtree.VisitedNodes[w].distance, 2) / pow(ref,2)); }else{ nodes[idx].nWeights[w] = kdtree.VisitedNodes[0].distance * dgw / kdtree.VisitedNodes[w].distance; } nodes[idx].nDistances[w] = kdtree.VisitedNodes[w].distance; nodes[idx].nIds[w] = kdtree.VisitedNodes[w].id; } } } void defGraph::updateActiveNodesWeightsKDTree(kdTree &kdtree, MeshSTD &targetMesh, float4x4 cuPose){ for (int idx = 0; idx < targetMesh.verticesNum; idx ++){ // graph node to KDtree node type struct kdNode currNode = {idx, {targetMesh.vertices[idx].position.x, targetMesh.vertices[idx].position.y, targetMesh.vertices[idx].position.z}}; // finding closest neighbors in KDtree structure kdtree.findKNN(currNode); for(int w = 0; w < nNum; w++){ int nidx = idx * nNum + w; float ref = dgw; // float ref = dgw * kdtree.VisitedNodes[0].distance; if(expWeight){ visibleNWeights[nidx] = exp(-pow(kdtree.VisitedNodes[w].distance,2) / pow(ref,2)); }else{ visibleNWeights[nidx] = kdtree.VisitedNodes[0].distance * dgw / kdtree.VisitedNodes[w].distance; } visibleNDistances[nidx] = kdtree.VisitedNodes[w].distance; visibleNodeIds[nidx] = kdtree.VisitedNodes[w].id; // printf("id: %d, w:%d, %d, %f, %f \n", idx, w , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); } } } void defGraph::updateActiveNodesWeightsKDTree(kdTree &kdtree, pyramid &targetImage, rgbdSensor sensor, float4x4 cuPose){ int size = sensor.rows * sensor.cols; for (int idx = 0; idx < size; idx ++){ // graph node to KDtree node type // todo... check the range again !!!! if(targetImage.depth[idx] < DEPTH_MAX && targetImage.depth[idx] > DEPTH_MIN){ geometry::PointXYZ vi = getPoint3d(idx, targetImage.depth[idx], sensor); struct kdNode currNode = {idx, {vi.x, vi.y, vi.z}}; // finding closest neighbors in KDtree structure kdtree.findKNN(currNode); for(int w = 0; w < nNum; w++){ int nidx = idx * nNum + w; float ref = dgw; // float ref = dgw * kdtree.VisitedNodes[0].distance; if(expWeight){ visibleNWeights[nidx] = exp(-pow(kdtree.VisitedNodes[w].distance,2) / pow(ref,2)); }else{ visibleNWeights[nidx] = kdtree.VisitedNodes[0].distance * dgw / kdtree.VisitedNodes[w].distance; } visibleNDistances[nidx] = kdtree.VisitedNodes[w].distance; visibleNodeIds[nidx] = kdtree.VisitedNodes[w].id; // printf("id: %d, w:%d, %d, %f, %f \n", idx, w , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); } } } } /*****************************************************************/ void defGraph::randomNum(int randNums[], int elements, int range){ for (int i = 0; i < elements; i++){ bool same; do{ same = false; randNums[i] = rand() % range; // Check if the newly generated number is a duplicate: for (int check = 0; check < i; check++){ if (randNums[i] == randNums[check]){ same = true; break; } } } while (same); } } void defGraph::sampleDownMeshDeice(void){ float *randDevData,*hostData; int n = NODE_NUM; cudaMalloc((void **)&randDevData, n * sizeof(float)); hostData = (float *)calloc(n, sizeof(float)); /* initialize random seed: */ curandGenerator_t gen; /* Create pseudo-random number generator */ curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); /* Set seed */ curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); /* Generate n floats on device */ curandGenerateUniform(gen, randDevData, n); /* Copy device memory to host */ cudaMemcpy(hostData, randDevData, n * sizeof(float),cudaMemcpyDeviceToHost); // update node ids querry (subsampling origianl mesh to the max number of nodes "NODE_NUM") // take fisrt n nodes as deformation graph nodes for(int cnt=0; cnt < NODE_NUM; cnt++){ int num = static_cast<int>(hostData[cnt] * this->defGraphMesh.verticesNum); this->nodes[cnt].id = cnt; this->nodes[cnt].vertex = this->defGraphMesh.vertices[num]; this->nodes[cnt].vertexID = num; // printf("%d: %d \n",cnt, num); } curandDestroyGenerator(gen); } void defGraph::sampleDownMeshHost(int obsNum, int mode){ // update node ids querry (subsampling origianl mesh to the max number of nodes "NODE_NUM") // take fisrt n nodes as deformation graph nodes // image or random nodes assignment std::cout << "activeNodesNum: "<< activeNodesNum << ", mode: " << mode << std::endl; if(mode == 0){ //declare array int *nodeIds_rand; //random number generator srand(static_cast<int>(time(0))); nodeIds_rand = new int[activeNodesNum]; randomNum(nodeIds_rand, activeNodesNum, obsNum); for(int cnt = 0; cnt < activeNodesNum; cnt++){ this->nodes[cnt].id = cnt; this->nodes[cnt].vertex = this->defGraphMesh.vertices[nodeIds_rand[cnt]]; this->nodes[cnt].vertexID = nodeIds_rand[cnt]; // std::cout << this->nodes[cnt].id << ", " << this->nodes[cnt].vertexID << std::endl; } }else{ // identical nodes as mesh vertices in order of mesh for(int cnt=0; cnt < activeNodesNum; cnt++){ this->nodes[cnt].id = cnt; this->nodes[cnt].vertex = this->defGraphMesh.vertices[cnt]; this->nodes[cnt].vertexID = cnt; // std::cout << this->nodes[cnt].id << ", " << this->nodes[cnt].vertexID << std::endl; } } } /*****************************************************************/ __global__ void updateNodesDQKernel(defGraph& graph, float* dqVector){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int size = graph.nodeNum; math::EulerAngles EA; for (int idx = index; idx < size; idx += stride){ EA.roll = dqVector[idx * 6 + 0]; EA.pitch = dqVector[idx * 6 + 1]; EA.yaw = dqVector[idx * 6 + 2]; graph.nodes[idx].dq = math::dualQuat(math::Quaternion(EA), make_float3(dqVector[idx * 6 + 3], dqVector[idx * 6 + 4], dqVector[idx * 6 + 5])); // printf("%f, %f, %f, %f, %f, %f \n", EA.roll, EA.pitch, EA.yaw ,dqVector[idx * 6 + 3], dqVector[idx * 6 + 4],dqVector[idx * 6 + 5]); } } void defGraph::updateNodesDQ(float* dqVector){ // update Euclidian distnaces between nodes int threads_per_block = 512; int thread_blocks =(this->nodeNum + threads_per_block - 1) / threads_per_block; // std::cout << "<<< updateNodesDQ >>> threadBlocks: "<< thread_blocks << // ", threadPerBlock: " << threads_per_block << // std::endl; updateNodesDQKernel<<<thread_blocks, threads_per_block>>>(*this, dqVector); cudaDeviceSynchronize(); } void defGraph::writeDefGraphToFile(const char* fileName, int obsNum, int mode){ defGraphNode *h_nodes = new defGraphNode[NODE_NUM]; cudaMemcpy(h_nodes, this->nodes, sizeof(defGraphNode) * NODE_NUM, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); std::ofstream file(fileName,std::ios::ate); if (file.is_open()){ if(mode == 0){ file << fileName <<", NODE_NUM: " << NODE_NUM << std::endl; for (size_t i = 0; i < nodeNum; i++){ file << "[ " << std::endl; file << "id: " << h_nodes[i].id << std::endl; file << "nNum: " << nNum << std::endl; file << "nodeIds: " ; for(int j = 0; j < nNum; j++){ file << h_nodes[i].nIds[j] << " "; } file << std::endl; file << "VertexPose: " << h_nodes[i].vertex.position.x << " " << h_nodes[i].vertex.position.y << " " << h_nodes[i].vertex.position.z << " " << std::endl; file << "VertexNormal: " << h_nodes[i].vertex.normal.x << " " << h_nodes[i].vertex.normal.y << " " << h_nodes[i].vertex.normal.z << " " << std::endl; file << "VertexColor: " << h_nodes[i].vertex.color.x << " " << h_nodes[i].vertex.color.y << " " << h_nodes[i].vertex.color.z << " " << std::endl; file << "dq: " << h_nodes[i].dq << std::endl; file << "nDistances: " ; for(int j = 0; j < nNum; j++){ file << h_nodes[i].nDistances[j] << " "; } file << std::endl; file << "nWeights: " ; for(int j = 0; j < nNum; j++){ file << h_nodes[i].nWeights[j] << " "; } file << std::endl; file << "]," << std::endl; } }else if(mode == 1){ // for(int cnt = 0; cnt < obsNum; cnt ++){ // for(int j =0; j<nNum; j++){ // int nidx = cnt * nNum + j; // printf("xcid: %d, j:%d, %d, %f, %f \n", cnt, j , visibleNodeIds[nidx], visibleNDistances[nidx], visibleNWeights[nidx]); // } // } file << fileName <<", ACTIVE_NODE_NUM: " << visibleNodesNum << ", ObsNum: " << obsNum<< std::endl; int i = 0; while(true){ int index = i * KNN; file << "[ " << std::endl; file << "pixelID/vertexID: " << i << std::endl; file << "visibleNodeIds: " ; for(int j = 0; j < nNum; j++){ file << this->visibleNodeIds[index + j] << " "; } file << std::endl; file << "visibleNDistances: " ; for(int j = 0; j < nNum; j++){ file << this->visibleNDistances[index + j] << " "; } file << std::endl; file << "visibleNWeights: " ; for(int j = 0; j < nNum; j++){ file << this->visibleNWeights[index + j] << " "; } file << std::endl; file << "]," << std::endl; i++; if(i >= obsNum || i >= 500)break; } } file.close(); } else std::cout << "Unable to open file"; delete[] h_nodes; } } // namespace geometry } // namespace DynaMap
6b36ed83dfccee3beb6d9258035d653c625d053b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gm/types/vec3f.h> #include <tri/tri.h> TRI_NS_OPEN __global__ void ConvertRGBFloatToRGBAUint32_Kernel( size_t i_numPixels, const gm::Vec3f* i_image, uint32_t* o_image ) { int pixelIndex = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( pixelIndex >= i_numPixels ) { return; } const gm::Vec3f& inPixel = i_image[ pixelIndex ]; uint8_t* outPixel = reinterpret_cast< uint8_t* >( &o_image[ pixelIndex ] ); outPixel[ 0 ] = static_cast< uint8_t >( 255.999 * inPixel[ 0 ] ); outPixel[ 1 ] = static_cast< uint8_t >( 255.999 * inPixel[ 1 ] ); outPixel[ 2 ] = static_cast< uint8_t >( 255.999 * inPixel[ 2 ] ); } TRI_NS_CLOSE
6b36ed83dfccee3beb6d9258035d653c625d053b.cu
#include <gm/types/vec3f.h> #include <tri/tri.h> TRI_NS_OPEN __global__ void ConvertRGBFloatToRGBAUint32_Kernel( size_t i_numPixels, const gm::Vec3f* i_image, uint32_t* o_image ) { int pixelIndex = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( pixelIndex >= i_numPixels ) { return; } const gm::Vec3f& inPixel = i_image[ pixelIndex ]; uint8_t* outPixel = reinterpret_cast< uint8_t* >( &o_image[ pixelIndex ] ); outPixel[ 0 ] = static_cast< uint8_t >( 255.999 * inPixel[ 0 ] ); outPixel[ 1 ] = static_cast< uint8_t >( 255.999 * inPixel[ 1 ] ); outPixel[ 2 ] = static_cast< uint8_t >( 255.999 * inPixel[ 2 ] ); } TRI_NS_CLOSE
8d875df9cb3e5cf3bcd02610bacb277ad5547ccd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "cudaNormalize.h" #include "cudaVector.h" // gpuNormalize template <typename T> __global__ void gpuNormalize( T* input, T* output, int width, int height, float scaling_factor, float max_input ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= width || y >= height ) return; const T px = input[ y * width + x ]; output[y*width+x] = make_vec<T>(px.x * scaling_factor, px.y * scaling_factor, px.z * scaling_factor, alpha(px, max_input) * scaling_factor); } template<typename T> hipError_t launchNormalizeRGB( T* input, const float2& input_range, T* output, const float2& output_range, size_t width, size_t height ) { if( !input || !output ) return hipErrorInvalidDevicePointer; if( width == 0 || height == 0 ) return hipErrorInvalidValue; const float multiplier = output_range.y / input_range.y; // launch kernel const dim3 blockDim(32,8); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y)); hipLaunchKernelGGL(( gpuNormalize<T>), dim3(gridDim), dim3(blockDim), 0, 0, input, output, width, height, multiplier, input_range.y); return CUDA(hipGetLastError()); } // cudaNormalizeRGB hipError_t cudaNormalizeRGB( float3* input, const float2& input_range, float3* output, const float2& output_range, size_t width, size_t height ) { return launchNormalizeRGB<float3>(input, input_range, output, output_range, width, height); } // cudaNormalizeRGBA hipError_t cudaNormalizeRGBA( float4* input, const float2& input_range, float4* output, const float2& output_range, size_t width, size_t height ) { return launchNormalizeRGB<float4>(input, input_range, output, output_range, width, height); }
8d875df9cb3e5cf3bcd02610bacb277ad5547ccd.cu
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "cudaNormalize.h" #include "cudaVector.h" // gpuNormalize template <typename T> __global__ void gpuNormalize( T* input, T* output, int width, int height, float scaling_factor, float max_input ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= width || y >= height ) return; const T px = input[ y * width + x ]; output[y*width+x] = make_vec<T>(px.x * scaling_factor, px.y * scaling_factor, px.z * scaling_factor, alpha(px, max_input) * scaling_factor); } template<typename T> cudaError_t launchNormalizeRGB( T* input, const float2& input_range, T* output, const float2& output_range, size_t width, size_t height ) { if( !input || !output ) return cudaErrorInvalidDevicePointer; if( width == 0 || height == 0 ) return cudaErrorInvalidValue; const float multiplier = output_range.y / input_range.y; // launch kernel const dim3 blockDim(32,8); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y)); gpuNormalize<T><<<gridDim, blockDim>>>(input, output, width, height, multiplier, input_range.y); return CUDA(cudaGetLastError()); } // cudaNormalizeRGB cudaError_t cudaNormalizeRGB( float3* input, const float2& input_range, float3* output, const float2& output_range, size_t width, size_t height ) { return launchNormalizeRGB<float3>(input, input_range, output, output_range, width, height); } // cudaNormalizeRGBA cudaError_t cudaNormalizeRGBA( float4* input, const float2& input_range, float4* output, const float2& output_range, size_t width, size_t height ) { return launchNormalizeRGB<float4>(input, input_range, output, output_range, width, height); }
e9ec1ac3712382a9664c02467133f953f9582ef2.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> #include <sys/time.h> #include <hip/hip_cooperative_groups.h> #include <hip/hip_runtime_api.h> using namespace cooperative_groups; /////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca" //////////////large vs small data. /////test if l1/l2 hit can still cause page faults? //////nvprof --profile-from-start off --print-gpu-trace --log-file test1.txt --csv ./fault_group_test15 //////result: L2(different SM) won't catch it (multiple faults). void init_cpu_data(long long int* A, long long int size, double stride){ for (long long int i = 0; i < size; i++){ A[i]=1; } /* for (long long int i = 0; i < size - stride; i++){ A[i]=(i + stride); } for (long long int i = size - stride; i < size; i++){ A[i]=0; } */ } __global__ void gpu_initialization(long long int *A, double data_stride, long long int data_size){ long long int index = (blockIdx.x * blockDim.x + threadIdx.x); long long int thread_num = gridDim.x * blockDim.x; for(long long int it = 0; it < data_size; it = it + thread_num){ A[index + it]=23; } } long long unsigned time_diff(timespec start, timespec end){ struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0){ temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else{ temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } long long unsigned time_interval_ns = temp.tv_nsec; long long unsigned time_interval_s = temp.tv_sec; time_interval_s = time_interval_s * 1000000000; return time_interval_s + time_interval_ns; } #define stride 1 /* ///////////////512(4k), 1024(8k), 8192(64k), 16384(128k), 262144 (2m), 4194304 (32m), 8388608 (64m), __global__ void page_visitor4(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////1 thread 1 data / 1 warp 1 data long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; //double temp = (warp_id * 32 + (threadIdx.x % 32) ) * stride; double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride; long long int index = __double2ll_rd(temp); long long int value1; //if(threadIdx.x <= clock_count){ value1 = A1[index]; B1[index] = value1; //} } __global__ void page_visitor5(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 thread all data int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; //double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride; long long int index = __double2ll_rd(temp); long long int value1; for(long long int i = 0; i <= clock_count; i++){ if(threadIdx.x == 0){ value1 = A1[index]; B1[index] = value1; } index+=stride; } } __global__ void page_visitor6(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 warp all data int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; //double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride; long long int index = __double2ll_rd(temp); long long int value1; for(long long int i = 0; i <= clock_count; i++){ value1 = A1[index]; B1[index] = value1; index += 32 *stride; } } */ __global__ void page_visitor7(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 warp 1 thread data int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; //double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride; long long int index = __double2ll_rd(temp); long long int value1; value1 = A1[0]; B1[index] = value1; } ///////////long 0 - 31 same core ///////////long 0 - 64 same core ///////////long 0 - 64 different core ///////////mixed 0 - 64 same core ///////////mixed 0 - 64 different core int main(int argc, char **argv) { printf("\n"); // set device hipDeviceProp_t device_prop; //long long int dev_id = findCudaDevice(argc, (const char **) argv); long long int dev_id = 0; checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id)); //int peak_clk = 1;//kHz //checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id)); //float clock_rate = (float) peak_clk; //printf("clock_rate:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == hipComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } /* if (device_prop.concurrentManagedAccess == 1){ printf("This device supports concurrent Managed Access.\n"); }else{ printf("This device does not support concurrent Managed Access.\n"); } */ int value1 = 1; checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id)); //printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1); //long long int num_thread = 256; //long long int size_of_data = 524288; for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(hipMalloc(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } //gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max //hipDeviceSynchronize(); if(0){ hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } hipProfilerStart();////////////////////////////////start /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; hipLaunchKernelGGL(( page_visitor7), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp 1 data same core hipDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(hipFree(CPU_data_in1)); checkCudaErrors(hipFree(GPU_data_out1)); hipProfilerStop();/////////////////////////////////stop } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(hipMalloc(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } //gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max //hipDeviceSynchronize(); if(0){ hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } hipProfilerStart();////////////////////////////////start /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; hipLaunchKernelGGL(( page_visitor7), dim3(block_num), dim3(1024), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp 1 data dif cores hipDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(hipFree(CPU_data_in1)); checkCudaErrors(hipFree(GPU_data_out1)); hipProfilerStop();/////////////////////////////////stop } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(hipMalloc(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } //gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max //hipDeviceSynchronize(); if(0){ hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } hipProfilerStart();////////////////////////////////start /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); long long int block_num = 64; hipLaunchKernelGGL(( page_visitor7), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp all data hipDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(hipFree(CPU_data_in1)); checkCudaErrors(hipFree(GPU_data_out1)); hipProfilerStop();/////////////////////////////////stop } } } } } } } printf("\n"); exit(EXIT_SUCCESS); }
e9ec1ac3712382a9664c02467133f953f9582ef2.cu
#include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> #include <sys/time.h> #include <cooperative_groups.h> #include <cuda_profiler_api.h> using namespace cooperative_groups; /////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca" //////////////large vs small data. /////test if l1/l2 hit can still cause page faults? //////nvprof --profile-from-start off --print-gpu-trace --log-file test1.txt --csv ./fault_group_test15 //////result: L2(different SM) won't catch it (multiple faults). void init_cpu_data(long long int* A, long long int size, double stride){ for (long long int i = 0; i < size; i++){ A[i]=1; } /* for (long long int i = 0; i < size - stride; i++){ A[i]=(i + stride); } for (long long int i = size - stride; i < size; i++){ A[i]=0; } */ } __global__ void gpu_initialization(long long int *A, double data_stride, long long int data_size){ long long int index = (blockIdx.x * blockDim.x + threadIdx.x); long long int thread_num = gridDim.x * blockDim.x; for(long long int it = 0; it < data_size; it = it + thread_num){ A[index + it]=23; } } long long unsigned time_diff(timespec start, timespec end){ struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0){ temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else{ temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } long long unsigned time_interval_ns = temp.tv_nsec; long long unsigned time_interval_s = temp.tv_sec; time_interval_s = time_interval_s * 1000000000; return time_interval_s + time_interval_ns; } #define stride 1 /* ///////////////512(4k), 1024(8k), 8192(64k), 16384(128k), 262144 (2m), 4194304 (32m), 8388608 (64m), __global__ void page_visitor4(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////1 thread 1 data / 1 warp 1 data long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; //double temp = (warp_id * 32 + (threadIdx.x % 32) ) * stride; double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride; long long int index = __double2ll_rd(temp); long long int value1; //if(threadIdx.x <= clock_count){ value1 = A1[index]; B1[index] = value1; //} } __global__ void page_visitor5(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 thread all data int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; //double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride; long long int index = __double2ll_rd(temp); long long int value1; for(long long int i = 0; i <= clock_count; i++){ if(threadIdx.x == 0){ value1 = A1[index]; B1[index] = value1; } index+=stride; } } __global__ void page_visitor6(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 warp all data int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; //double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride; long long int index = __double2ll_rd(temp); long long int value1; for(long long int i = 0; i <= clock_count; i++){ value1 = A1[index]; B1[index] = value1; index += 32 *stride; } } */ __global__ void page_visitor7(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///1 warp 1 thread data int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; //double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; double temp = (threadIdx.x + blockIdx.x * blockDim.x) * stride; long long int index = __double2ll_rd(temp); long long int value1; value1 = A1[0]; B1[index] = value1; } ///////////long 0 - 31 same core ///////////long 0 - 64 same core ///////////long 0 - 64 different core ///////////mixed 0 - 64 same core ///////////mixed 0 - 64 different core int main(int argc, char **argv) { printf("\n"); // set device cudaDeviceProp device_prop; //long long int dev_id = findCudaDevice(argc, (const char **) argv); long long int dev_id = 0; checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id)); //int peak_clk = 1;//kHz //checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id)); //float clock_rate = (float) peak_clk; //printf("clock_rate:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == cudaComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } /* if (device_prop.concurrentManagedAccess == 1){ printf("This device supports concurrent Managed Access.\n"); }else{ printf("This device does not support concurrent Managed Access.\n"); } */ int value1 = 1; checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id)); //printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1); //long long int num_thread = 256; //long long int size_of_data = 524288; for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(cudaMalloc(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } //gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max //cudaDeviceSynchronize(); if(0){ gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } cudaProfilerStart();////////////////////////////////start /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; page_visitor7<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp 1 data same core cudaDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(cudaFree(CPU_data_in1)); checkCudaErrors(cudaFree(GPU_data_out1)); cudaProfilerStop();/////////////////////////////////stop } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(cudaMalloc(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } //gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max //cudaDeviceSynchronize(); if(0){ gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } cudaProfilerStart();////////////////////////////////start /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; page_visitor7<<<block_num, 1024>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp 1 data dif cores cudaDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(cudaFree(CPU_data_in1)); checkCudaErrors(cudaFree(GPU_data_out1)); cudaProfilerStop();/////////////////////////////////stop } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 31; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(cudaMalloc(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } //gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max //cudaDeviceSynchronize(); if(0){ gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } cudaProfilerStart();////////////////////////////////start /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); long long int block_num = 64; page_visitor7<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);///////1 warp all data cudaDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(cudaFree(CPU_data_in1)); checkCudaErrors(cudaFree(GPU_data_out1)); cudaProfilerStop();/////////////////////////////////stop } } } } } } } printf("\n"); exit(EXIT_SUCCESS); }
978ba75c782c6dd161f7224990b0988a3db6d738.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/tile_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" namespace phi { template <typename T, typename Context> void TileKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& repeat_times, DenseTensor* out) { auto x_dims = x.dims(); auto rank = x_dims.size(); auto repeat_times_data = repeat_times.GetData(); int repeat_times_size = repeat_times_data.size(); rank = ::max(rank, repeat_times_size); if (rank == 0) { phi::Copy<DeviceContext>(dev_ctx, x, dev_ctx.GetPlace(), false, out); return; } for (size_t i = 0; i < repeat_times_data.size(); ++i) { PADDLE_ENFORCE_GT( repeat_times_data[i], 0, errors::InvalidArgument( "All elements of the input 'repeat_times' for tile op must " "be positive integers, but the value received is %d.", repeat_times_data[i])); } auto vec_x_dims = phi::vectorize<int>(x_dims); if (repeat_times_data.size() < vec_x_dims.size()) { int diff = vec_x_dims.size() - repeat_times_data.size(); repeat_times_data.insert(repeat_times_data.begin(), diff, 1); } else { int diff = repeat_times_data.size() - vec_x_dims.size(); vec_x_dims.insert(vec_x_dims.begin(), diff, 1); } PADDLE_ENFORCE_EQ( repeat_times_data.size(), vec_x_dims.size(), errors::InvalidArgument( "The rank (%d) of the input 'x' and the rank (%d) of the input " "'repeat_times' for tile op must match after promotion.", vec_x_dims.size(), repeat_times_data.size())); DDim new_x_dims = make_ddim(vec_x_dims); DDim out_dims(new_x_dims); DenseTensor new_x = x; vec_x_dims.insert(vec_x_dims.begin(), 1, 1); for (size_t i = 0; i < repeat_times_data.size(); ++i) { out_dims[i] *= repeat_times_data[i]; new_x.Resize(make_ddim(vec_x_dims)); std::vector<const DenseTensor*> ins = {&new_x}; vec_x_dims[i] *= repeat_times_data[i]; if (i != repeat_times_data.size() - 1) { if (repeat_times_data[i] != 1) { DenseTensor tmp_out; tmp_out.Resize(make_ddim(vec_x_dims)); dev_ctx.template Alloc<T>(&tmp_out); std::vector<DenseTensor*> outs = {&tmp_out}; phi::funcs::BroadcastKernel<ElementwiseType::kUnary, T, T>( dev_ctx, ins, &outs, i, kps::IdentityFunctor<T>()); tmp_out.Resize(out_dims); new_x = tmp_out; } vec_x_dims[i] *= vec_x_dims[i + 1]; vec_x_dims[i + 1] = 1; } else { out->Resize(make_ddim(vec_x_dims)); dev_ctx.template Alloc<T>(out); std::vector<DenseTensor*> outs = {out}; phi::funcs::BroadcastKernel<ElementwiseType::kUnary, T, T>( dev_ctx, ins, &outs, i, kps::IdentityFunctor<T>()); out->Resize(out_dims); } } } } // namespace phi PD_REGISTER_KERNEL(tile, GPU, ALL_LAYOUT, phi::TileKernel, bool, float, double, int, int64_t, phi::dtype::float16, phi::dtype::bfloat16, phi::dtype::complex<float>, phi::dtype::complex<double>) {}
978ba75c782c6dd161f7224990b0988a3db6d738.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/tile_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" namespace phi { template <typename T, typename Context> void TileKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& repeat_times, DenseTensor* out) { auto x_dims = x.dims(); auto rank = x_dims.size(); auto repeat_times_data = repeat_times.GetData(); int repeat_times_size = repeat_times_data.size(); rank = std::max(rank, repeat_times_size); if (rank == 0) { phi::Copy<DeviceContext>(dev_ctx, x, dev_ctx.GetPlace(), false, out); return; } for (size_t i = 0; i < repeat_times_data.size(); ++i) { PADDLE_ENFORCE_GT( repeat_times_data[i], 0, errors::InvalidArgument( "All elements of the input 'repeat_times' for tile op must " "be positive integers, but the value received is %d.", repeat_times_data[i])); } auto vec_x_dims = phi::vectorize<int>(x_dims); if (repeat_times_data.size() < vec_x_dims.size()) { int diff = vec_x_dims.size() - repeat_times_data.size(); repeat_times_data.insert(repeat_times_data.begin(), diff, 1); } else { int diff = repeat_times_data.size() - vec_x_dims.size(); vec_x_dims.insert(vec_x_dims.begin(), diff, 1); } PADDLE_ENFORCE_EQ( repeat_times_data.size(), vec_x_dims.size(), errors::InvalidArgument( "The rank (%d) of the input 'x' and the rank (%d) of the input " "'repeat_times' for tile op must match after promotion.", vec_x_dims.size(), repeat_times_data.size())); DDim new_x_dims = make_ddim(vec_x_dims); DDim out_dims(new_x_dims); DenseTensor new_x = x; vec_x_dims.insert(vec_x_dims.begin(), 1, 1); for (size_t i = 0; i < repeat_times_data.size(); ++i) { out_dims[i] *= repeat_times_data[i]; new_x.Resize(make_ddim(vec_x_dims)); std::vector<const DenseTensor*> ins = {&new_x}; vec_x_dims[i] *= repeat_times_data[i]; if (i != repeat_times_data.size() - 1) { if (repeat_times_data[i] != 1) { DenseTensor tmp_out; tmp_out.Resize(make_ddim(vec_x_dims)); dev_ctx.template Alloc<T>(&tmp_out); std::vector<DenseTensor*> outs = {&tmp_out}; phi::funcs::BroadcastKernel<ElementwiseType::kUnary, T, T>( dev_ctx, ins, &outs, i, kps::IdentityFunctor<T>()); tmp_out.Resize(out_dims); new_x = tmp_out; } vec_x_dims[i] *= vec_x_dims[i + 1]; vec_x_dims[i + 1] = 1; } else { out->Resize(make_ddim(vec_x_dims)); dev_ctx.template Alloc<T>(out); std::vector<DenseTensor*> outs = {out}; phi::funcs::BroadcastKernel<ElementwiseType::kUnary, T, T>( dev_ctx, ins, &outs, i, kps::IdentityFunctor<T>()); out->Resize(out_dims); } } } } // namespace phi PD_REGISTER_KERNEL(tile, GPU, ALL_LAYOUT, phi::TileKernel, bool, float, double, int, int64_t, phi::dtype::float16, phi::dtype::bfloat16, phi::dtype::complex<float>, phi::dtype::complex<double>) {}
5a73d85f21fef49ce9a1831465135cd530366864.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) { float tmp_1 = +0.0f * var_2 * (var_3 - (+1.7217E35f * -0.0f)); float tmp_2 = (var_4 + var_5); comp = tmp_2 + tmp_1 * (var_6 + var_7); comp += acosf(+1.4852E-36f); for (int i=0; i < var_1; ++i) { float tmp_3 = -1.9678E35f; comp += tmp_3 + +1.7015E34f - var_8 / (+1.9013E-25f + var_9 * +1.6933E36f); comp = var_10 / +1.0225E-41f * -1.9365E36f; comp = var_11 + (var_12 + -1.8486E-41f); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13); hipDeviceSynchronize(); return 0; }
5a73d85f21fef49ce9a1831465135cd530366864.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) { float tmp_1 = +0.0f * var_2 * (var_3 - (+1.7217E35f * -0.0f)); float tmp_2 = (var_4 + var_5); comp = tmp_2 + tmp_1 * (var_6 + var_7); comp += acosf(+1.4852E-36f); for (int i=0; i < var_1; ++i) { float tmp_3 = -1.9678E35f; comp += tmp_3 + +1.7015E34f - var_8 / (+1.9013E-25f + var_9 * +1.6933E36f); comp = var_10 / +1.0225E-41f * -1.9365E36f; comp = var_11 + (var_12 + -1.8486E-41f); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13); cudaDeviceSynchronize(); return 0; }
91103ef311349d7da30eadde1f13ca604f2d85f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/sparse/SparseStubs.h> #include <ATen/native/sparse/SparseBinaryOpIntersectionCommon.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/KernelUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/AccumulateType.h> namespace at::native { namespace { template <typename func_t> struct CUDAKernelLauncher { static void launch(TensorIteratorBase& iter, const func_t& f) { gpu_kernel(iter, f); } }; struct MulOp { template <typename scalar_t> static FUNCAPI INLINE scalar_t apply(scalar_t a, scalar_t b) { return a * b; } }; template <> FUNCAPI INLINE bool MulOp::apply(bool a, bool b) { return a && b; } struct RhsProjOp { template <typename scalar_t> static FUNCAPI scalar_t apply(scalar_t a, scalar_t b) { return b; } }; template <int nt, int vt, typename loop_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void apply_kernel(int n, loop_t loop) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < n) { loop(idx); idx += nt; } } } template <int nt, int vt, typename loop_t> void launch_kernel(int64_t n, const loop_t& loop) { TORCH_INTERNAL_ASSERT(0 <= n && n <= std::numeric_limits<int32_t>::max()); if (!n) { return; } const dim3 block(nt); const dim3 grid((n + block.x * vt - 1) / (block.x * vt)); const auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( apply_kernel<nt, vt, loop_t>), dim3(grid), dim3(block), 0, stream, n, loop); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename binary_op_t, typename scalar_t, typename index_t> void binary_op_intersection_kernel( TensorIterator& iter, int64_t lhs_nnz_stride, int64_t rhs_nnz_stride, const Tensor& argsort) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { binary_op_intersection_kernel<binary_op_t, scalar_t, index_t>( sub_iter, lhs_nnz_stride, rhs_nnz_stride, argsort); } return; } auto* RESTRICT ptr_res_values_bytes = reinterpret_cast<char*>(iter.data_ptr(0)); const auto* RESTRICT ptr_lhs_values_bytes = reinterpret_cast<char*>(iter.data_ptr(1)); const auto* RESTRICT ptr_lhs_select_idx_bytes = reinterpret_cast<char*>(iter.data_ptr(2)); const auto* RESTRICT ptr_rhs_values_bytes = reinterpret_cast<char*>(iter.data_ptr(3)); const auto* RESTRICT ptr_rhs_select_idx_bytes = reinterpret_cast<char*>(iter.data_ptr(4)); const auto* RESTRICT ptr_intersction_counts_bytes = reinterpret_cast<char*>(iter.data_ptr(5)); const auto* RESTRICT ptr_argsort = argsort.data_ptr<index_t>(); auto offset_calc = make_offset_calculator<6>(iter); auto loop = [=] FUNCAPI (int i) { auto offsets = offset_calc.get(i); auto* RESTRICT ptr_res_values = reinterpret_cast<scalar_t*>(ptr_res_values_bytes + offsets[0]); const auto* RESTRICT ptr_lhs_values = reinterpret_cast<const scalar_t*>(ptr_lhs_values_bytes + offsets[1]); const auto lhs_nnz_idx = *reinterpret_cast<const index_t*>(ptr_lhs_select_idx_bytes + offsets[2]); const auto* RESTRICT ptr_rhs_values = reinterpret_cast<const scalar_t*>(ptr_rhs_values_bytes + offsets[3]); const auto rhs_nnz_idx = *reinterpret_cast<const index_t*>(ptr_rhs_select_idx_bytes + offsets[4]); const auto count = *reinterpret_cast<const int64_t*>(ptr_intersction_counts_bytes + offsets[5]); const auto* RESTRICT ptr_lhs_begin = ptr_lhs_values + lhs_nnz_idx * lhs_nnz_stride; const auto* RESTRICT ptr_rhs_sorted_nnz_idx = ptr_argsort + rhs_nnz_idx; using accscalar_t = at::acc_type<scalar_t, /*is_gpu=*/true>; accscalar_t res_values = 0; accscalar_t lhs_values = static_cast<accscalar_t>(*ptr_lhs_begin); accscalar_t rhs_values; index_t rhs_sorted_nnz_idx; for (int64_t c = 0; c < count; ++c) { rhs_sorted_nnz_idx = *ptr_rhs_sorted_nnz_idx++; rhs_values = static_cast<accscalar_t>(*(ptr_rhs_values + rhs_sorted_nnz_idx * rhs_nnz_stride)); res_values += binary_op_t::apply(lhs_values, rhs_values); } *ptr_res_values = static_cast<scalar_t>(res_values); }; launch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } template <typename binary_op_t> struct CUDAValueSelectionIntersectionKernel { static Tensor apply( const Tensor& lhs_values, const Tensor& lhs_select_idx, const Tensor& rhs_values, const Tensor& rhs_select_idx, const Tensor& intersection_counts, const Tensor& argsort) { auto iter = make_value_selection_intersection_iter( lhs_values, lhs_select_idx, rhs_values, rhs_select_idx, intersection_counts); auto res_values = iter.tensor(0); // If res_values is empty, we can return it right away. // Otherwise floating point issues with OffsetCalculator. if (!res_values.numel()) { return res_values; } const auto lhs_nnz_stride = lhs_values.stride(0); const auto rhs_nnz_stride = rhs_values.stride(0); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::Half, ScalarType::BFloat16, res_values.scalar_type(), "binary_op_intersection_cpu", [&] { // COO indices are only 64-bit for now. using index_t = int64_t; binary_op_intersection_kernel<binary_op_t, scalar_t, index_t>( iter, lhs_nnz_stride, rhs_nnz_stride, argsort); }); return res_values; } }; void mul_sparse_sparse_out_cuda_kernel( Tensor& result, const Tensor& x, const Tensor& y) { using CUDAValueSelectionMulKernel = CUDAValueSelectionIntersectionKernel<MulOp>; _sparse_binary_op_intersection_kernel_out<CUDAKernelLauncher, CUDAValueSelectionMulKernel>( result, x, y ); } void sparse_mask_intersection_out_cuda_kernel( Tensor& result, const Tensor& x, const Tensor& y) { using CUDAValueRhsProjKernel = CUDAValueSelectionIntersectionKernel<RhsProjOp>; _sparse_binary_op_intersection_kernel_out<CUDAKernelLauncher, CUDAValueRhsProjKernel>( result, x, y, true ); } } REGISTER_CUDA_DISPATCH(mul_sparse_sparse_out_stub, &mul_sparse_sparse_out_cuda_kernel); REGISTER_CUDA_DISPATCH(sparse_mask_intersection_out_stub, &sparse_mask_intersection_out_cuda_kernel); } // namespace at::native
91103ef311349d7da30eadde1f13ca604f2d85f7.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/sparse/SparseStubs.h> #include <ATen/native/sparse/SparseBinaryOpIntersectionCommon.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/KernelUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/AccumulateType.h> namespace at::native { namespace { template <typename func_t> struct CUDAKernelLauncher { static void launch(TensorIteratorBase& iter, const func_t& f) { gpu_kernel(iter, f); } }; struct MulOp { template <typename scalar_t> static FUNCAPI INLINE scalar_t apply(scalar_t a, scalar_t b) { return a * b; } }; template <> FUNCAPI INLINE bool MulOp::apply(bool a, bool b) { return a && b; } struct RhsProjOp { template <typename scalar_t> static FUNCAPI scalar_t apply(scalar_t a, scalar_t b) { return b; } }; template <int nt, int vt, typename loop_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void apply_kernel(int n, loop_t loop) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < n) { loop(idx); idx += nt; } } } template <int nt, int vt, typename loop_t> void launch_kernel(int64_t n, const loop_t& loop) { TORCH_INTERNAL_ASSERT(0 <= n && n <= std::numeric_limits<int32_t>::max()); if (!n) { return; } const dim3 block(nt); const dim3 grid((n + block.x * vt - 1) / (block.x * vt)); const auto stream = at::cuda::getCurrentCUDAStream(); apply_kernel<nt, vt, loop_t><<<grid, block, 0, stream>>>(n, loop); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename binary_op_t, typename scalar_t, typename index_t> void binary_op_intersection_kernel( TensorIterator& iter, int64_t lhs_nnz_stride, int64_t rhs_nnz_stride, const Tensor& argsort) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { binary_op_intersection_kernel<binary_op_t, scalar_t, index_t>( sub_iter, lhs_nnz_stride, rhs_nnz_stride, argsort); } return; } auto* RESTRICT ptr_res_values_bytes = reinterpret_cast<char*>(iter.data_ptr(0)); const auto* RESTRICT ptr_lhs_values_bytes = reinterpret_cast<char*>(iter.data_ptr(1)); const auto* RESTRICT ptr_lhs_select_idx_bytes = reinterpret_cast<char*>(iter.data_ptr(2)); const auto* RESTRICT ptr_rhs_values_bytes = reinterpret_cast<char*>(iter.data_ptr(3)); const auto* RESTRICT ptr_rhs_select_idx_bytes = reinterpret_cast<char*>(iter.data_ptr(4)); const auto* RESTRICT ptr_intersction_counts_bytes = reinterpret_cast<char*>(iter.data_ptr(5)); const auto* RESTRICT ptr_argsort = argsort.data_ptr<index_t>(); auto offset_calc = make_offset_calculator<6>(iter); auto loop = [=] FUNCAPI (int i) { auto offsets = offset_calc.get(i); auto* RESTRICT ptr_res_values = reinterpret_cast<scalar_t*>(ptr_res_values_bytes + offsets[0]); const auto* RESTRICT ptr_lhs_values = reinterpret_cast<const scalar_t*>(ptr_lhs_values_bytes + offsets[1]); const auto lhs_nnz_idx = *reinterpret_cast<const index_t*>(ptr_lhs_select_idx_bytes + offsets[2]); const auto* RESTRICT ptr_rhs_values = reinterpret_cast<const scalar_t*>(ptr_rhs_values_bytes + offsets[3]); const auto rhs_nnz_idx = *reinterpret_cast<const index_t*>(ptr_rhs_select_idx_bytes + offsets[4]); const auto count = *reinterpret_cast<const int64_t*>(ptr_intersction_counts_bytes + offsets[5]); const auto* RESTRICT ptr_lhs_begin = ptr_lhs_values + lhs_nnz_idx * lhs_nnz_stride; const auto* RESTRICT ptr_rhs_sorted_nnz_idx = ptr_argsort + rhs_nnz_idx; using accscalar_t = at::acc_type<scalar_t, /*is_gpu=*/true>; accscalar_t res_values = 0; accscalar_t lhs_values = static_cast<accscalar_t>(*ptr_lhs_begin); accscalar_t rhs_values; index_t rhs_sorted_nnz_idx; for (int64_t c = 0; c < count; ++c) { rhs_sorted_nnz_idx = *ptr_rhs_sorted_nnz_idx++; rhs_values = static_cast<accscalar_t>(*(ptr_rhs_values + rhs_sorted_nnz_idx * rhs_nnz_stride)); res_values += binary_op_t::apply(lhs_values, rhs_values); } *ptr_res_values = static_cast<scalar_t>(res_values); }; launch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } template <typename binary_op_t> struct CUDAValueSelectionIntersectionKernel { static Tensor apply( const Tensor& lhs_values, const Tensor& lhs_select_idx, const Tensor& rhs_values, const Tensor& rhs_select_idx, const Tensor& intersection_counts, const Tensor& argsort) { auto iter = make_value_selection_intersection_iter( lhs_values, lhs_select_idx, rhs_values, rhs_select_idx, intersection_counts); auto res_values = iter.tensor(0); // If res_values is empty, we can return it right away. // Otherwise floating point issues with OffsetCalculator. if (!res_values.numel()) { return res_values; } const auto lhs_nnz_stride = lhs_values.stride(0); const auto rhs_nnz_stride = rhs_values.stride(0); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::Half, ScalarType::BFloat16, res_values.scalar_type(), "binary_op_intersection_cpu", [&] { // COO indices are only 64-bit for now. using index_t = int64_t; binary_op_intersection_kernel<binary_op_t, scalar_t, index_t>( iter, lhs_nnz_stride, rhs_nnz_stride, argsort); }); return res_values; } }; void mul_sparse_sparse_out_cuda_kernel( Tensor& result, const Tensor& x, const Tensor& y) { using CUDAValueSelectionMulKernel = CUDAValueSelectionIntersectionKernel<MulOp>; _sparse_binary_op_intersection_kernel_out<CUDAKernelLauncher, CUDAValueSelectionMulKernel>( result, x, y ); } void sparse_mask_intersection_out_cuda_kernel( Tensor& result, const Tensor& x, const Tensor& y) { using CUDAValueRhsProjKernel = CUDAValueSelectionIntersectionKernel<RhsProjOp>; _sparse_binary_op_intersection_kernel_out<CUDAKernelLauncher, CUDAValueRhsProjKernel>( result, x, y, true ); } } REGISTER_CUDA_DISPATCH(mul_sparse_sparse_out_stub, &mul_sparse_sparse_out_cuda_kernel); REGISTER_CUDA_DISPATCH(sparse_mask_intersection_out_stub, &sparse_mask_intersection_out_cuda_kernel); } // namespace at::native
35ccf0ed9abc305ea8e8ebcf1128cf1d60df5109.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> __global__ void square(float * d_in, float * d_out){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; } int main(int argc, char ** argv){ const int ARRAY_SIZE = 3; const int ARRAY_BYTES = ARRAY_SIZE*sizeof(float); float h_in[ARRAY_SIZE]; float h_out[ARRAY_SIZE]; for(int i=0; i<ARRAY_SIZE; i++){ h_in[i] = float(i); } float * d_in; float * d_out; hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_out, ARRAY_BYTES); hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_in, d_out); hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); for(int i=0; i<ARRAY_SIZE; i++){ printf("%f \n", h_out[i]); } hipFree(d_in); hipFree(d_out); return 0; }
35ccf0ed9abc305ea8e8ebcf1128cf1d60df5109.cu
#include<stdio.h> #include<cuda.h> __global__ void square(float * d_in, float * d_out){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; } int main(int argc, char ** argv){ const int ARRAY_SIZE = 3; const int ARRAY_BYTES = ARRAY_SIZE*sizeof(float); float h_in[ARRAY_SIZE]; float h_out[ARRAY_SIZE]; for(int i=0; i<ARRAY_SIZE; i++){ h_in[i] = float(i); } float * d_in; float * d_out; cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); square<<<1, ARRAY_SIZE>>>(d_in, d_out); cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); for(int i=0; i<ARRAY_SIZE; i++){ printf("%f \n", h_out[i]); } cudaFree(d_in); cudaFree(d_out); return 0; }
32777ecc7c2530d42076d81706d12c122dbfc0cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <mpi.h> #include <nvshmem.h> #include <nvshmemx.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #ifdef HAVE_CUB #include <hipcub/hipcub.hpp> #endif // HAVE_CUB #define MPI_CALL(call) \ { \ int mpi_status = call; \ if (0 != mpi_status) { \ char mpi_error_string[MPI_MAX_ERROR_STRING]; \ int mpi_error_string_length = 0; \ MPI_Error_string(mpi_status, mpi_error_string, &mpi_error_string_length); \ if (NULL != mpi_error_string) \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %s " \ "(%d).\n", \ #call, __LINE__, __FILE__, mpi_error_string, mpi_status); \ else \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %d.\n", \ #call, __LINE__, __FILE__, mpi_status); \ } \ } #ifdef USE_NVTX #include <roctracer/roctx.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE roctxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ hipError_t cudaStatus = call; \ if (hipSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \ } // convert NVSHMEM_SYMMETRIC_SIZE string to long long unsigned int long long unsigned int parse_nvshmem_symmetric_size(char *value) { long long unsigned int units, size; assert(value != NULL); if (strchr(value, 'G') != NULL) { units=1e9; } else if (strchr(value, 'M') != NULL) { units=1e6; } else if (strchr(value, 'K') != NULL) { units=1e3; } else { units=1; } assert(atof(value) >= 0); size = (long long unsigned int) atof(value) * units; return size; } typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); /* This kernel implements neighborhood synchronization for Jacobi. It updates the neighbor PEs about its arrival and waits for notification from them. */ __global__ void syncneighborhood_kernel(int my_pe, int num_pes, volatile long* sync_arr, long counter) { int next_rank = (my_pe + 1) % num_pes; int prev_rank = (my_pe == 0) ? num_pes - 1 : my_pe - 1; nvshmem_quiet(); /* To ensure all prior nvshmem operations have been completed */ /* Notify neighbors about arrival */ nvshmem_long_p((long*)sync_arr, counter, next_rank); nvshmem_long_p((long*)sync_arr + 1, counter, prev_rank); /* Wait for neighbors notification */ while (counter > *(sync_arr)) ; while (counter > *(sync_arr + 1)) ; } __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int offset, const int nx, const int my_ny, int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[(iy + 1) * nx + 0] = y0; a[(iy + 1) * nx + (nx - 1)] = y0; a_new[(iy + 1) * nx + 0] = y0; a_new[(iy + 1) * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, const int top_pe, const int top_iy, const int bottom_pe, const int bottom_iy) { #ifdef HAVE_CUB typedef hipcub::BlockReduce<real, BLOCK_DIM_X, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; real local_l2_norm = 0.0; if (iy < iy_end && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; real residue = new_val - a[iy * nx + ix]; local_l2_norm += residue * residue; } /* starting (x, y) coordinate of the block */ int block_iy = iy - threadIdx.y; /* Alternatively, block_iy = blockIdx.y * blockDim.y + iy_start */ int block_ix = ix - threadIdx.x; /* Alternatively, block_ix = blockIdx.x * blockDim.x + 1 */ /* Communicate the boundaries */ if ((block_iy <= iy_start) && (iy_start < block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + top_iy * nx + block_ix, a_new + iy_start * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), top_pe); } if ((block_iy < iy_end) && (iy_end <= block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + bottom_iy * nx + block_ix, a_new + (iy_end - 1) * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), bottom_pe); } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print, int mype); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { hipEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nx = get_argval<int>(argv, argv + argc, "-nx", 7168); const int ny = get_argval<int>(argv, argv + argc, "-ny", 7168); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const bool csv = get_arg(argv, argv + argc, "-csv"); if (nccheck != 1) { fprintf(stderr, "Only nccheck=1 is supported\n"); return -1; } real* a_new; real* a_ref_h; real* a_h; double runtime_serial = 0.0; real l2_norms[2]; int rank = 0, size = 1; MPI_CALL(MPI_Init(&argc, &argv)); MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &size)); int num_devices; CUDA_RT_CALL(hipGetDeviceCount(&num_devices)); int local_rank = -1, local_size = 1; { MPI_Comm local_comm; MPI_Info info; MPI_CALL(MPI_Info_create(&info)); MPI_CALL( MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, info, &local_comm)); MPI_CALL(MPI_Comm_rank(local_comm, &local_rank)); MPI_CALL(MPI_Comm_size(local_comm, &local_size)); if (num_devices < local_size) { fprintf(stderr, "ERROR: Number of devices is less numer of PEs \ on the node!\n"); MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); MPI_CALL(MPI_Finalize()); return -1; } MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); } CUDA_RT_CALL(hipSetDevice(local_rank)); CUDA_RT_CALL(hipFree(0)); MPI_Comm mpi_comm; nvshmemx_init_attr_t attr; mpi_comm = MPI_COMM_WORLD; attr.mpi_comm = &mpi_comm; // Set symmetric heap size for nvshmem based on problem size // Its default value in nvshmem is 1 GB which is not sufficient // for large mesh sizes long long unsigned int mesh_size_per_rank = nx * (((ny - 2) + size - 1) / size + 2); long long unsigned int required_symmetric_heap_size = 2 * mesh_size_per_rank * sizeof(real) * 1.1; // Factor 2 is because 2 arrays are allocated - a and a_new // 1.1 factor is just for alignment or other usage char * value = getenv("NVSHMEM_SYMMETRIC_SIZE"); if (value) { /* env variable is set */ long long unsigned int size_env = parse_nvshmem_symmetric_size(value); if (size_env < required_symmetric_heap_size) { fprintf(stderr, "ERROR: Minimum NVSHMEM_SYMMETRIC_SIZE = %lluB, Current NVSHMEM_SYMMETRIC_SIZE = %s\n", required_symmetric_heap_size, value); MPI_CALL(MPI_Finalize()); return -1; } } else { char symmetric_heap_size_str[100]; sprintf(symmetric_heap_size_str, "%llu", required_symmetric_heap_size); if (!rank && !csv) printf("Setting environment variable NVSHMEM_SYMMETRIC_SIZE = %llu\n", required_symmetric_heap_size); setenv("NVSHMEM_SYMMETRIC_SIZE", symmetric_heap_size_str, 1); } nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); int npes = nvshmem_n_pes(); int mype = nvshmem_my_pe(); nvshmem_barrier_all(); bool result_correct = true; real* a; hipStream_t compute_stream; hipStream_t reset_l2_norm_stream; hipEvent_t compute_done[2]; hipEvent_t reset_l2_norm_done[2]; l2_norm_buf l2_norm_bufs[2]; CUDA_RT_CALL(hipHostMalloc(&a_ref_h, nx * ny * sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&a_h, nx * ny * sizeof(real))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv && (0 == mype), mype); nvshmem_barrier_all(); // ny - 2 rows are distributed amongst `size` ranks in such a way // that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows. // This optimizes load balancing when (ny - 2) % size != 0 int chunk_size; int chunk_size_low = (ny - 2) / npes; int chunk_size_high = chunk_size_low + 1; // To calculate the number of ranks that need to compute an extra row, // the following formula is derived from this equation: // num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = ny - 2 int num_ranks_low = npes * chunk_size_low + npes - (ny - 2); // Number of ranks with chunk_size = chunk_size_low if (mype < num_ranks_low) chunk_size = chunk_size_low; else chunk_size = chunk_size_high; a = (real*)nvshmem_malloc( nx * (chunk_size_high + 2) * sizeof(real)); // Using chunk_size_high so that it is same across all PEs a_new = (real*)nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(real)); hipMemset(a, 0, nx * (chunk_size + 2) * sizeof(real)); hipMemset(a_new, 0, nx * (chunk_size + 2) * sizeof(real)); // Calculate local domain boundaries int iy_start_global; // My start index in the global array if (mype < num_ranks_low) { iy_start_global = mype * chunk_size_low + 1; } else { iy_start_global = num_ranks_low * chunk_size_low + (mype - num_ranks_low) * chunk_size_high + 1; } int iy_end_global = iy_start_global + chunk_size - 1; // My last index in the global array // do not process boundaries iy_end_global = ::min(iy_end_global, ny - 4); int iy_start = 1; int iy_end = (iy_end_global - iy_start_global + 1) + iy_start; // calculate boundary indices for top and bottom boundaries int top_pe = mype > 0 ? mype - 1 : (npes - 1); int bottom_pe = (mype + 1) % npes; int iy_end_top = (top_pe < num_ranks_low) ? chunk_size_low + 1 : chunk_size_high + 1; int iy_start_bottom = 0; // Set diriclet boundary conditions on left and right boundary hipLaunchKernelGGL(( initialize_boundaries), dim3((ny / npes) / 128 + 1), dim3(128), 0, 0, a, a_new, PI, iy_start_global - 1, nx, chunk_size, ny - 2); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreateWithFlags(&compute_stream, hipStreamNonBlocking)); CUDA_RT_CALL(hipStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done[0], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done[1], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[0], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[1], hipEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipEventCreateWithFlags(&l2_norm_bufs[i].copy_done, hipEventDisableTiming)); CUDA_RT_CALL(hipMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(hipMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } nvshmemx_barrier_all_on_stream(compute_stream); MPI_CALL(MPI_Allreduce(l2_norm_bufs[0].h, &l2_norms[0], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[1].h, &l2_norms[1], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); CUDA_RT_CALL(hipDeviceSynchronize()); if (!mype) { if (!csv) printf("Jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, ny, nx); } constexpr int dim_block_x = 1024; constexpr int dim_block_y = 1; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (chunk_size + dim_block_y - 1) / dim_block_y, 1); int iter = 0; if (!mype) { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } nvshmem_barrier_all(); double start = MPI_Wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; /* Used by syncneighborhood kernel */ long* sync_arr = NULL; sync_arr = (long*)nvshmem_malloc(2 * sizeof(long)); hipMemsetAsync(sync_arr, 0, 2 * sizeof(long), compute_stream); hipStreamSynchronize(compute_stream); long synccounter = 1; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, top_pe, iy_end_top, bottom_pe, iy_start_bottom); CUDA_RT_CALL(hipGetLastError()); /* Instead of using nvshmemx_barrier_all_on_stream, we are using a custom implementation of barrier that just synchronizes with the neighbor PEs that is the PEs with whom a PE communicates. This will perform faster than a global barrier that would do redundant synchronization for this application. */ hipLaunchKernelGGL(( syncneighborhood_kernel), dim3(1), dim3(1), 0, compute_stream, mype, npes, sync_arr, synccounter); synccounter++; // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), hipMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(hipEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the data for // calculation CUDA_RT_CALL(hipEventSynchronize(l2_norm_bufs[prev].copy_done)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[prev].h, &l2_norms[prev], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (!csv && (iter % 100) == 0) { if (!mype) printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(real), hipMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(hipEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); nvshmem_barrier_all(); double stop = MPI_Wtime(); POP_RANGE nvshmem_barrier_all(); CUDA_RT_CALL(hipMemcpy(a_h + iy_start_global * nx, a + nx, ::min(ny - 2 - iy_start_global, chunk_size) * nx * sizeof(real), hipMemcpyDeviceToHost)); result_correct = true; for (int iy = iy_start_global; result_correct && (iy < iy_end_global); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR on rank %d: a[%d * %d + %d] = %f does not match %f " "(reference)\n", rank, iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } int global_result_correct = 1; MPI_CALL(MPI_Allreduce(&result_correct, &global_result_correct, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD)); result_correct = global_result_correct; if (!mype && result_correct) { if (csv) { printf("nvshmem_opt, %d, %d, %d, %d, %d, 1, %f, %f\n", nx, ny, iter_max, nccheck, npes, (stop - start), runtime_serial); } else { printf("Num GPUs: %d.\n", npes); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, npes, (stop - start), runtime_serial / (stop - start), runtime_serial / (npes * (stop - start)) * 100); } } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipHostFree(l2_norm_bufs[i].h)); CUDA_RT_CALL(hipFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(hipEventDestroy(l2_norm_bufs[i].copy_done)); } nvshmem_free(a); nvshmem_free(a_new); nvshmem_free(sync_arr); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(hipEventDestroy(compute_done[1])); CUDA_RT_CALL(hipEventDestroy(compute_done[0])); CUDA_RT_CALL(hipStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipHostFree(a_h)); CUDA_RT_CALL(hipHostFree(a_ref_h)); nvshmem_finalize(); MPI_CALL(MPI_Finalize()); return (result_correct == 1) ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print, int mype) { real* a; real* a_new; hipStream_t compute_stream; real* l2_norm_d; real* l2_norm_h; int iy_start = 1; int iy_end = ny - 3; CUDA_RT_CALL(hipMalloc((void**)&a, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMalloc((void**)&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder hipLaunchKernelGGL(( initialize_boundaries), dim3(ny / 128 + 1), dim3(128), 0, 0, a, a_new, PI, 0, nx, ny - 2, ny - 2); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreate(&compute_stream)); CUDA_RT_CALL(hipMalloc(&l2_norm_d, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_h, sizeof(real))); CUDA_RT_CALL(hipDeviceSynchronize()); if (print) printf( "Single GPU jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 1024; constexpr int dim_block_y = 1; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, ((ny - 2) + dim_block_y - 1) / dim_block_y, 1); int iter = 0; real l2_norm = 1.0; CUDA_RT_CALL(hipDeviceSynchronize()); double start = MPI_Wtime(); PUSH_RANGE("Jacobi solve", 0) while (l2_norm > tol && iter < iter_max) { CUDA_RT_CALL(hipMemsetAsync(l2_norm_d, 0, sizeof(real), compute_stream)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new, a, l2_norm_d, iy_start, iy_end, nx, mype, iy_end + 1, mype, (iy_start - 1)); CUDA_RT_CALL(hipGetLastError()); if ((iter % nccheck) == 0 || (print && ((iter % 100) == 0))) { CUDA_RT_CALL(hipMemcpyAsync(l2_norm_h, l2_norm_d, sizeof(real), hipMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(hipStreamSynchronize(compute_stream)); l2_norm = *l2_norm_h; l2_norm = std::sqrt(l2_norm); if (print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); POP_RANGE double stop = MPI_Wtime(); CUDA_RT_CALL(hipMemcpy(a_ref_h, a, nx * ny * sizeof(real), hipMemcpyDeviceToHost)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipHostFree(l2_norm_h)); CUDA_RT_CALL(hipFree(l2_norm_d)); CUDA_RT_CALL(hipFree(a_new)); CUDA_RT_CALL(hipFree(a)); return (stop - start); }
32777ecc7c2530d42076d81706d12c122dbfc0cc.cu
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <mpi.h> #include <nvshmem.h> #include <nvshmemx.h> #include <algorithm> #include <cassert> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #ifdef HAVE_CUB #include <cub/block/block_reduce.cuh> #endif // HAVE_CUB #define MPI_CALL(call) \ { \ int mpi_status = call; \ if (0 != mpi_status) { \ char mpi_error_string[MPI_MAX_ERROR_STRING]; \ int mpi_error_string_length = 0; \ MPI_Error_string(mpi_status, mpi_error_string, &mpi_error_string_length); \ if (NULL != mpi_error_string) \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %s " \ "(%d).\n", \ #call, __LINE__, __FILE__, mpi_error_string, mpi_status); \ else \ fprintf(stderr, \ "ERROR: MPI call \"%s\" in line %d of file %s failed " \ "with %d.\n", \ #call, __LINE__, __FILE__, mpi_status); \ } \ } #ifdef USE_NVTX #include <nvToolsExt.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE nvtxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } // convert NVSHMEM_SYMMETRIC_SIZE string to long long unsigned int long long unsigned int parse_nvshmem_symmetric_size(char *value) { long long unsigned int units, size; assert(value != NULL); if (strchr(value, 'G') != NULL) { units=1e9; } else if (strchr(value, 'M') != NULL) { units=1e6; } else if (strchr(value, 'K') != NULL) { units=1e3; } else { units=1; } assert(atof(value) >= 0); size = (long long unsigned int) atof(value) * units; return size; } typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); /* This kernel implements neighborhood synchronization for Jacobi. It updates the neighbor PEs about its arrival and waits for notification from them. */ __global__ void syncneighborhood_kernel(int my_pe, int num_pes, volatile long* sync_arr, long counter) { int next_rank = (my_pe + 1) % num_pes; int prev_rank = (my_pe == 0) ? num_pes - 1 : my_pe - 1; nvshmem_quiet(); /* To ensure all prior nvshmem operations have been completed */ /* Notify neighbors about arrival */ nvshmem_long_p((long*)sync_arr, counter, next_rank); nvshmem_long_p((long*)sync_arr + 1, counter, prev_rank); /* Wait for neighbors notification */ while (counter > *(sync_arr)) ; while (counter > *(sync_arr + 1)) ; } __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int offset, const int nx, const int my_ny, int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[(iy + 1) * nx + 0] = y0; a[(iy + 1) * nx + (nx - 1)] = y0; a_new[(iy + 1) * nx + 0] = y0; a_new[(iy + 1) * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, const int top_pe, const int top_iy, const int bottom_pe, const int bottom_iy) { #ifdef HAVE_CUB typedef cub::BlockReduce<real, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; real local_l2_norm = 0.0; if (iy < iy_end && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; real residue = new_val - a[iy * nx + ix]; local_l2_norm += residue * residue; } /* starting (x, y) coordinate of the block */ int block_iy = iy - threadIdx.y; /* Alternatively, block_iy = blockIdx.y * blockDim.y + iy_start */ int block_ix = ix - threadIdx.x; /* Alternatively, block_ix = blockIdx.x * blockDim.x + 1 */ /* Communicate the boundaries */ if ((block_iy <= iy_start) && (iy_start < block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + top_iy * nx + block_ix, a_new + iy_start * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), top_pe); } if ((block_iy < iy_end) && (iy_end <= block_iy + blockDim.y)) { nvshmemx_float_put_nbi_block(a_new + bottom_iy * nx + block_ix, a_new + (iy_end - 1) * nx + block_ix, min(blockDim.x, nx - 1 - block_ix), bottom_pe); } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print, int mype); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { cudaEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nx = get_argval<int>(argv, argv + argc, "-nx", 7168); const int ny = get_argval<int>(argv, argv + argc, "-ny", 7168); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const bool csv = get_arg(argv, argv + argc, "-csv"); if (nccheck != 1) { fprintf(stderr, "Only nccheck=1 is supported\n"); return -1; } real* a_new; real* a_ref_h; real* a_h; double runtime_serial = 0.0; real l2_norms[2]; int rank = 0, size = 1; MPI_CALL(MPI_Init(&argc, &argv)); MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &size)); int num_devices; CUDA_RT_CALL(cudaGetDeviceCount(&num_devices)); int local_rank = -1, local_size = 1; { MPI_Comm local_comm; MPI_Info info; MPI_CALL(MPI_Info_create(&info)); MPI_CALL( MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, rank, info, &local_comm)); MPI_CALL(MPI_Comm_rank(local_comm, &local_rank)); MPI_CALL(MPI_Comm_size(local_comm, &local_size)); if (num_devices < local_size) { fprintf(stderr, "ERROR: Number of devices is less numer of PEs \ on the node!\n"); MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); MPI_CALL(MPI_Finalize()); return -1; } MPI_CALL(MPI_Comm_free(&local_comm)); MPI_CALL(MPI_Info_free(&info)); } CUDA_RT_CALL(cudaSetDevice(local_rank)); CUDA_RT_CALL(cudaFree(0)); MPI_Comm mpi_comm; nvshmemx_init_attr_t attr; mpi_comm = MPI_COMM_WORLD; attr.mpi_comm = &mpi_comm; // Set symmetric heap size for nvshmem based on problem size // Its default value in nvshmem is 1 GB which is not sufficient // for large mesh sizes long long unsigned int mesh_size_per_rank = nx * (((ny - 2) + size - 1) / size + 2); long long unsigned int required_symmetric_heap_size = 2 * mesh_size_per_rank * sizeof(real) * 1.1; // Factor 2 is because 2 arrays are allocated - a and a_new // 1.1 factor is just for alignment or other usage char * value = getenv("NVSHMEM_SYMMETRIC_SIZE"); if (value) { /* env variable is set */ long long unsigned int size_env = parse_nvshmem_symmetric_size(value); if (size_env < required_symmetric_heap_size) { fprintf(stderr, "ERROR: Minimum NVSHMEM_SYMMETRIC_SIZE = %lluB, Current NVSHMEM_SYMMETRIC_SIZE = %s\n", required_symmetric_heap_size, value); MPI_CALL(MPI_Finalize()); return -1; } } else { char symmetric_heap_size_str[100]; sprintf(symmetric_heap_size_str, "%llu", required_symmetric_heap_size); if (!rank && !csv) printf("Setting environment variable NVSHMEM_SYMMETRIC_SIZE = %llu\n", required_symmetric_heap_size); setenv("NVSHMEM_SYMMETRIC_SIZE", symmetric_heap_size_str, 1); } nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr); int npes = nvshmem_n_pes(); int mype = nvshmem_my_pe(); nvshmem_barrier_all(); bool result_correct = true; real* a; cudaStream_t compute_stream; cudaStream_t reset_l2_norm_stream; cudaEvent_t compute_done[2]; cudaEvent_t reset_l2_norm_done[2]; l2_norm_buf l2_norm_bufs[2]; CUDA_RT_CALL(cudaMallocHost(&a_ref_h, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&a_h, nx * ny * sizeof(real))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv && (0 == mype), mype); nvshmem_barrier_all(); // ny - 2 rows are distributed amongst `size` ranks in such a way // that each rank gets either (ny - 2) / size or (ny - 2) / size + 1 rows. // This optimizes load balancing when (ny - 2) % size != 0 int chunk_size; int chunk_size_low = (ny - 2) / npes; int chunk_size_high = chunk_size_low + 1; // To calculate the number of ranks that need to compute an extra row, // the following formula is derived from this equation: // num_ranks_low * chunk_size_low + (size - num_ranks_low) * (chunk_size_low + 1) = ny - 2 int num_ranks_low = npes * chunk_size_low + npes - (ny - 2); // Number of ranks with chunk_size = chunk_size_low if (mype < num_ranks_low) chunk_size = chunk_size_low; else chunk_size = chunk_size_high; a = (real*)nvshmem_malloc( nx * (chunk_size_high + 2) * sizeof(real)); // Using chunk_size_high so that it is same across all PEs a_new = (real*)nvshmem_malloc(nx * (chunk_size_high + 2) * sizeof(real)); cudaMemset(a, 0, nx * (chunk_size + 2) * sizeof(real)); cudaMemset(a_new, 0, nx * (chunk_size + 2) * sizeof(real)); // Calculate local domain boundaries int iy_start_global; // My start index in the global array if (mype < num_ranks_low) { iy_start_global = mype * chunk_size_low + 1; } else { iy_start_global = num_ranks_low * chunk_size_low + (mype - num_ranks_low) * chunk_size_high + 1; } int iy_end_global = iy_start_global + chunk_size - 1; // My last index in the global array // do not process boundaries iy_end_global = std::min(iy_end_global, ny - 4); int iy_start = 1; int iy_end = (iy_end_global - iy_start_global + 1) + iy_start; // calculate boundary indices for top and bottom boundaries int top_pe = mype > 0 ? mype - 1 : (npes - 1); int bottom_pe = (mype + 1) % npes; int iy_end_top = (top_pe < num_ranks_low) ? chunk_size_low + 1 : chunk_size_high + 1; int iy_start_bottom = 0; // Set diriclet boundary conditions on left and right boundary initialize_boundaries<<<(ny / npes) / 128 + 1, 128>>>(a, a_new, PI, iy_start_global - 1, nx, chunk_size, ny - 2); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreateWithFlags(&compute_stream, cudaStreamNonBlocking)); CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done[0], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done[1], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[0], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[1], cudaEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaEventCreateWithFlags(&l2_norm_bufs[i].copy_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } nvshmemx_barrier_all_on_stream(compute_stream); MPI_CALL(MPI_Allreduce(l2_norm_bufs[0].h, &l2_norms[0], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[1].h, &l2_norms[1], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); CUDA_RT_CALL(cudaDeviceSynchronize()); if (!mype) { if (!csv) printf("Jacobi relaxation: %d iterations on %d x %d mesh\n", iter_max, ny, nx); } constexpr int dim_block_x = 1024; constexpr int dim_block_y = 1; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (chunk_size + dim_block_y - 1) / dim_block_y, 1); int iter = 0; if (!mype) { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } nvshmem_barrier_all(); double start = MPI_Wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; /* Used by syncneighborhood kernel */ long* sync_arr = NULL; sync_arr = (long*)nvshmem_malloc(2 * sizeof(long)); cudaMemsetAsync(sync_arr, 0, 2 * sizeof(long), compute_stream); cudaStreamSynchronize(compute_stream); long synccounter = 1; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, top_pe, iy_end_top, bottom_pe, iy_start_bottom); CUDA_RT_CALL(cudaGetLastError()); /* Instead of using nvshmemx_barrier_all_on_stream, we are using a custom implementation of barrier that just synchronizes with the neighbor PEs that is the PEs with whom a PE communicates. This will perform faster than a global barrier that would do redundant synchronization for this application. */ syncneighborhood_kernel<<<1, 1, 0, compute_stream>>>(mype, npes, sync_arr, synccounter); synccounter++; // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), cudaMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the data for // calculation CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done)); MPI_CALL(MPI_Allreduce(l2_norm_bufs[prev].h, &l2_norms[prev], 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD)); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (!csv && (iter % 100) == 0) { if (!mype) printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(real), cudaMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); nvshmem_barrier_all(); double stop = MPI_Wtime(); POP_RANGE nvshmem_barrier_all(); CUDA_RT_CALL(cudaMemcpy(a_h + iy_start_global * nx, a + nx, std::min(ny - 2 - iy_start_global, chunk_size) * nx * sizeof(real), cudaMemcpyDeviceToHost)); result_correct = true; for (int iy = iy_start_global; result_correct && (iy < iy_end_global); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (std::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR on rank %d: a[%d * %d + %d] = %f does not match %f " "(reference)\n", rank, iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } int global_result_correct = 1; MPI_CALL(MPI_Allreduce(&result_correct, &global_result_correct, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD)); result_correct = global_result_correct; if (!mype && result_correct) { if (csv) { printf("nvshmem_opt, %d, %d, %d, %d, %d, 1, %f, %f\n", nx, ny, iter_max, nccheck, npes, (stop - start), runtime_serial); } else { printf("Num GPUs: %d.\n", npes); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, npes, (stop - start), runtime_serial / (stop - start), runtime_serial / (npes * (stop - start)) * 100); } } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h)); CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done)); } nvshmem_free(a); nvshmem_free(a_new); nvshmem_free(sync_arr); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(cudaEventDestroy(compute_done[1])); CUDA_RT_CALL(cudaEventDestroy(compute_done[0])); CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFreeHost(a_h)); CUDA_RT_CALL(cudaFreeHost(a_ref_h)); nvshmem_finalize(); MPI_CALL(MPI_Finalize()); return (result_correct == 1) ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print, int mype) { real* a; real* a_new; cudaStream_t compute_stream; real* l2_norm_d; real* l2_norm_h; int iy_start = 1; int iy_end = ny - 3; CUDA_RT_CALL(cudaMalloc((void**)&a, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMalloc((void**)&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, 0, nx, ny - 2, ny - 2); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreate(&compute_stream)); CUDA_RT_CALL(cudaMalloc(&l2_norm_d, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_h, sizeof(real))); CUDA_RT_CALL(cudaDeviceSynchronize()); if (print) printf( "Single GPU jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 1024; constexpr int dim_block_y = 1; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, ((ny - 2) + dim_block_y - 1) / dim_block_y, 1); int iter = 0; real l2_norm = 1.0; CUDA_RT_CALL(cudaDeviceSynchronize()); double start = MPI_Wtime(); PUSH_RANGE("Jacobi solve", 0) while (l2_norm > tol && iter < iter_max) { CUDA_RT_CALL(cudaMemsetAsync(l2_norm_d, 0, sizeof(real), compute_stream)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new, a, l2_norm_d, iy_start, iy_end, nx, mype, iy_end + 1, mype, (iy_start - 1)); CUDA_RT_CALL(cudaGetLastError()); if ((iter % nccheck) == 0 || (print && ((iter % 100) == 0))) { CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_h, l2_norm_d, sizeof(real), cudaMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(cudaStreamSynchronize(compute_stream)); l2_norm = *l2_norm_h; l2_norm = std::sqrt(l2_norm); if (print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); POP_RANGE double stop = MPI_Wtime(); CUDA_RT_CALL(cudaMemcpy(a_ref_h, a, nx * ny * sizeof(real), cudaMemcpyDeviceToHost)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFreeHost(l2_norm_h)); CUDA_RT_CALL(cudaFree(l2_norm_d)); CUDA_RT_CALL(cudaFree(a_new)); CUDA_RT_CALL(cudaFree(a)); return (stop - start); }
ea99b37181e6bad160977a1cf5992c291adf5734.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <errno.h> #include <stdint.h> #include <math.h> #include <assert.h> #include <limits.h> #include "c63.h" #include "cuda_util.hcu" #define MX (blockIdx.x * 8) #define MY (blockIdx.y * 8) #define RANGE 16 #define LENGTH 40 #define COMPSAD(i,j); \ minimum[res_index].value = __usad(ref[ref_index + j * 40 + i], orig[j * 8 + i], minimum[res_index].value); \ minimum[16 * 32 + res_index].value = __usad(ref[(16 * 40) + ref_index + j * 40 + i], orig[j * 8 + i], minimum[16 * 32 + res_index].value); struct min_helper { uint16_t value; int8_t x; int8_t y; }; /////////////////////////////////////////////////////////////////////////////// // CUDA KERNELS DEVICE //////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// __shared__ uint32_t ref[LENGTH * LENGTH]; // <= (40) * (40) __shared__ uint32_t orig[64]; __shared__ min_helper minimum[32 * 32]; __device__ inline void load_texture_values(int left, int top, int ref_index) { ref[ref_index] = tex2D(tex_ref, left + TX, top + TY); ref[16 * 40 + ref_index] = tex2D(tex_ref, left + TX, top + 16 + TY); if (TY < 8) { //TODO Fix warp serialization //load vertically the blocks to the right ref[TX * 40 + 32 + TY] = tex2D(tex_ref, left + 32 + TY, top + TX); } else { //load the bottom row int y = TY - 8; ref[(32 + y) * 40 + TX] = tex2D(tex_ref, left + TX, top + 32 + y); } if (TY < 8 && TX < 8) { ref[32 * 40 + 32 + TY * 40 + TX] = tex2D(tex_ref, left + 32 + TX, top + 32 + TY); orig[TY * 8 + TX] = tex2D(tex_orig, MX + TX, MY + TY); } __syncthreads(); } __device__ inline void calculate_usad(int res_index, int ref_index) { for (int j = 0; j < 8; j++) { for (int i = 0; i < 8; i++) { minimum[res_index].value = __usad(ref[ref_index + j * 40 + i], orig[j * 8 + i], minimum[res_index].value); } } for (int j = 0; j < 8; j++) { for (int i = 0; i < 8; i++) { minimum[16 * 32 + res_index].value = __usad(ref[(16 * 40) + ref_index + j * 40 + i], orig[j * 8 + i], minimum[16 * 32 + res_index].value); } } __syncthreads(); } __device__ inline void setup_min(int res_index) { minimum[res_index].x = TX; minimum[res_index].y = TY; minimum[res_index].value = 0; minimum[32 * 16 + res_index].x = TX; minimum[32 * 16 + res_index].y = 16 + TY; minimum[32 * 16 + res_index].value = 0; __syncthreads(); } #define MIN2(a,b) (a.value) > (b.value) ? (b) : (a); #define COMPMIN(idx) minimum[res_index] = MIN2(minimum[res_index], minimum[(idx)]); __device__ inline void reduce_min(int res_index) { COMPMIN(16 * 32 + res_index); __syncthreads(); // reduce to 2 block_rows if (TY < 8) COMPMIN(8 * 32 + res_index); __syncthreads(); // reduce to 1 block_row if (TY < 4) COMPMIN(4 * 32 + res_index); __syncthreads(); // reduce to 4 rows if (TY < 2) COMPMIN(2 * 32 + res_index); __syncthreads(); // reduce to 2 rows if (TY == 0) COMPMIN(32 + res_index); // reduce to 1 row, no need to sync anymore, within 1 warp if (TY == 0 && TX < 16) COMPMIN(16 + res_index); // reduce to 16 values if (TY == 0 && TX < 8) COMPMIN(8 + res_index); // reduce to 8 values if (TY == 0 && TX < 4) COMPMIN(4 + res_index); // reduce to 4 values if (TY == 0 && TX < 2) COMPMIN(2 + res_index); // reduce to 2 values if (TY == 0 && TX == 0) COMPMIN(1); // reduce to 1 value __syncthreads(); } /////////////////////////////////////////////////////////////////////////////// // CUDA KERNELS GLOBAL //////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// __global__ void cuda_me_texture(int width, int height, macroblock * mb, uint8_t *prediction, size_t prediction_pitch) { int left = MX - 16; int top = MY - 16; int right = MX + 16; int bottom = MY + 16; if (left < 0) left = 0; if (top < 0) top = 0; if (right > (width - 8)) // Increase search area towards the left if we're out of bounds left += (width - 8) - right; if (bottom > (height - 8)) // Increase search area towards the top if we're out of bounds top += (height - 8) - bottom; int res_index = TY * 32 + TX; int ref_index = TY * 40 + TX; load_texture_values(left, top, ref_index); setup_min(res_index); calculate_usad(res_index, ref_index); reduce_min(res_index); if (TX == 0 && TY == 10) { mb[blockIdx.y * gridDim.x + blockIdx.x].mv_x = minimum[0].x + (left - MX); mb[blockIdx.y * gridDim.x + blockIdx.x].mv_y = minimum[0].y + (top - MY); mb[blockIdx.y * gridDim.x + blockIdx.x].use_mv = 1; } else if (TX < 8 && TY < 8) { prediction[(BY * 8 + TY) * prediction_pitch + (BX * 8) + TX] = ref[(minimum[0].y + TY) * 40 + minimum[0].x + TX]; } } /* Motion estimation */ extern "C" void c63_motion_estimate(struct c63_common *cm, struct cuda_frame *cframe) { hipBindTexture2D(0, &tex_ref, cframe->last_recons->Y, &tex_ref.channelDesc, cm->ypw, cm->yph, cframe->last_recons_pitch[0]); hipBindTexture2D(0, &tex_orig, cframe->image->Y, &tex_orig.channelDesc, cm->ypw, cm->yph, cframe->image_pitch[0]); hipLaunchKernelGGL(( cuda_me_texture), dim3(cframe->me_blockDim_Y), dim3(cframe->me_threadDim), 0, 0, cm->ypw, cm->yph, cframe->mbs[0], cframe->predicted->Y, cframe->predicted_pitch[0]); hipBindTexture2D(0, &tex_ref, cframe->last_recons->U, &tex_ref.channelDesc, cm->upw, cm->uph, cframe->last_recons_pitch[1]); hipBindTexture2D(0, &tex_orig, cframe->image->U, &tex_orig.channelDesc, cm->upw, cm->uph, cframe->image_pitch[1]); hipLaunchKernelGGL(( cuda_me_texture), dim3(cframe->me_blockDim_UV), dim3(cframe->me_threadDim), 0, 0, cm->upw, cm->uph, cframe->mbs[1], cframe->predicted->U, cframe->predicted_pitch[1]); hipBindTexture2D(0, &tex_ref, cframe->last_recons->V, &tex_ref.channelDesc, cm->vpw, cm->vph, cframe->last_recons_pitch[2]); hipBindTexture2D(0, &tex_orig, cframe->image->V, &tex_orig.channelDesc, cm->vpw, cm->vph, cframe->image_pitch[2]); hipLaunchKernelGGL(( cuda_me_texture), dim3(cframe->me_blockDim_UV), dim3(cframe->me_threadDim), 0, 0, cm->vpw, cm->vph, cframe->mbs[2], cframe->predicted->V, cframe->predicted_pitch[2]); }
ea99b37181e6bad160977a1cf5992c291adf5734.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <errno.h> #include <stdint.h> #include <math.h> #include <assert.h> #include <limits.h> #include "c63.h" #include "cuda_util.hcu" #define MX (blockIdx.x * 8) #define MY (blockIdx.y * 8) #define RANGE 16 #define LENGTH 40 #define COMPSAD(i,j); \ minimum[res_index].value = __usad(ref[ref_index + j * 40 + i], orig[j * 8 + i], minimum[res_index].value); \ minimum[16 * 32 + res_index].value = __usad(ref[(16 * 40) + ref_index + j * 40 + i], orig[j * 8 + i], minimum[16 * 32 + res_index].value); struct min_helper { uint16_t value; int8_t x; int8_t y; }; /////////////////////////////////////////////////////////////////////////////// // CUDA KERNELS DEVICE //////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// __shared__ uint32_t ref[LENGTH * LENGTH]; // <= (40) * (40) __shared__ uint32_t orig[64]; __shared__ min_helper minimum[32 * 32]; __device__ inline void load_texture_values(int left, int top, int ref_index) { ref[ref_index] = tex2D(tex_ref, left + TX, top + TY); ref[16 * 40 + ref_index] = tex2D(tex_ref, left + TX, top + 16 + TY); if (TY < 8) { //TODO Fix warp serialization //load vertically the blocks to the right ref[TX * 40 + 32 + TY] = tex2D(tex_ref, left + 32 + TY, top + TX); } else { //load the bottom row int y = TY - 8; ref[(32 + y) * 40 + TX] = tex2D(tex_ref, left + TX, top + 32 + y); } if (TY < 8 && TX < 8) { ref[32 * 40 + 32 + TY * 40 + TX] = tex2D(tex_ref, left + 32 + TX, top + 32 + TY); orig[TY * 8 + TX] = tex2D(tex_orig, MX + TX, MY + TY); } __syncthreads(); } __device__ inline void calculate_usad(int res_index, int ref_index) { for (int j = 0; j < 8; j++) { for (int i = 0; i < 8; i++) { minimum[res_index].value = __usad(ref[ref_index + j * 40 + i], orig[j * 8 + i], minimum[res_index].value); } } for (int j = 0; j < 8; j++) { for (int i = 0; i < 8; i++) { minimum[16 * 32 + res_index].value = __usad(ref[(16 * 40) + ref_index + j * 40 + i], orig[j * 8 + i], minimum[16 * 32 + res_index].value); } } __syncthreads(); } __device__ inline void setup_min(int res_index) { minimum[res_index].x = TX; minimum[res_index].y = TY; minimum[res_index].value = 0; minimum[32 * 16 + res_index].x = TX; minimum[32 * 16 + res_index].y = 16 + TY; minimum[32 * 16 + res_index].value = 0; __syncthreads(); } #define MIN2(a,b) (a.value) > (b.value) ? (b) : (a); #define COMPMIN(idx) minimum[res_index] = MIN2(minimum[res_index], minimum[(idx)]); __device__ inline void reduce_min(int res_index) { COMPMIN(16 * 32 + res_index); __syncthreads(); // reduce to 2 block_rows if (TY < 8) COMPMIN(8 * 32 + res_index); __syncthreads(); // reduce to 1 block_row if (TY < 4) COMPMIN(4 * 32 + res_index); __syncthreads(); // reduce to 4 rows if (TY < 2) COMPMIN(2 * 32 + res_index); __syncthreads(); // reduce to 2 rows if (TY == 0) COMPMIN(32 + res_index); // reduce to 1 row, no need to sync anymore, within 1 warp if (TY == 0 && TX < 16) COMPMIN(16 + res_index); // reduce to 16 values if (TY == 0 && TX < 8) COMPMIN(8 + res_index); // reduce to 8 values if (TY == 0 && TX < 4) COMPMIN(4 + res_index); // reduce to 4 values if (TY == 0 && TX < 2) COMPMIN(2 + res_index); // reduce to 2 values if (TY == 0 && TX == 0) COMPMIN(1); // reduce to 1 value __syncthreads(); } /////////////////////////////////////////////////////////////////////////////// // CUDA KERNELS GLOBAL //////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// __global__ void cuda_me_texture(int width, int height, macroblock * mb, uint8_t *prediction, size_t prediction_pitch) { int left = MX - 16; int top = MY - 16; int right = MX + 16; int bottom = MY + 16; if (left < 0) left = 0; if (top < 0) top = 0; if (right > (width - 8)) // Increase search area towards the left if we're out of bounds left += (width - 8) - right; if (bottom > (height - 8)) // Increase search area towards the top if we're out of bounds top += (height - 8) - bottom; int res_index = TY * 32 + TX; int ref_index = TY * 40 + TX; load_texture_values(left, top, ref_index); setup_min(res_index); calculate_usad(res_index, ref_index); reduce_min(res_index); if (TX == 0 && TY == 10) { mb[blockIdx.y * gridDim.x + blockIdx.x].mv_x = minimum[0].x + (left - MX); mb[blockIdx.y * gridDim.x + blockIdx.x].mv_y = minimum[0].y + (top - MY); mb[blockIdx.y * gridDim.x + blockIdx.x].use_mv = 1; } else if (TX < 8 && TY < 8) { prediction[(BY * 8 + TY) * prediction_pitch + (BX * 8) + TX] = ref[(minimum[0].y + TY) * 40 + minimum[0].x + TX]; } } /* Motion estimation */ extern "C" void c63_motion_estimate(struct c63_common *cm, struct cuda_frame *cframe) { cudaBindTexture2D(0, &tex_ref, cframe->last_recons->Y, &tex_ref.channelDesc, cm->ypw, cm->yph, cframe->last_recons_pitch[0]); cudaBindTexture2D(0, &tex_orig, cframe->image->Y, &tex_orig.channelDesc, cm->ypw, cm->yph, cframe->image_pitch[0]); cuda_me_texture<<<cframe->me_blockDim_Y, cframe->me_threadDim>>>(cm->ypw, cm->yph, cframe->mbs[0], cframe->predicted->Y, cframe->predicted_pitch[0]); cudaBindTexture2D(0, &tex_ref, cframe->last_recons->U, &tex_ref.channelDesc, cm->upw, cm->uph, cframe->last_recons_pitch[1]); cudaBindTexture2D(0, &tex_orig, cframe->image->U, &tex_orig.channelDesc, cm->upw, cm->uph, cframe->image_pitch[1]); cuda_me_texture<<<cframe->me_blockDim_UV, cframe->me_threadDim>>>(cm->upw, cm->uph, cframe->mbs[1], cframe->predicted->U, cframe->predicted_pitch[1]); cudaBindTexture2D(0, &tex_ref, cframe->last_recons->V, &tex_ref.channelDesc, cm->vpw, cm->vph, cframe->last_recons_pitch[2]); cudaBindTexture2D(0, &tex_orig, cframe->image->V, &tex_orig.channelDesc, cm->vpw, cm->vph, cframe->image_pitch[2]); cuda_me_texture<<<cframe->me_blockDim_UV, cframe->me_threadDim>>>(cm->vpw, cm->vph, cframe->mbs[2], cframe->predicted->V, cframe->predicted_pitch[2]); }
3a1213da78476c0b466ab4eb39c4044a878a3965.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "smallsieve.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <time.h> typedef unsigned __int64 uint64; typedef unsigned __int32 uint32; const uint64 pow2_32 = 4294967296; const uint32 threads = 256; const uint32 blocks = 8; const uint64 segsize = pow2_32 / (threads*blocks); ///////////////////////////KERNELS START/////////////////////////// //Checks if x is prime; if bit corresponding to x is 0, then return true. __device__ bool isPrime(uint64 *mark, uint64 x) { return (mark[x / 128] & ((uint64)1 << ((x >> 1) & 63))) ? 0 : 1; } //Set the bit corresponding to x __device__ void bitSet(uint64 *mark, uint64 x) { mark[x / 128] |= ((uint64)1 << ((x >> 1) & 63)); } __global__ void SieveBlock(uint32 *P, uint32 completed, uint32 plen) { //Each thread sieves [(pow2_32 >> 1) / threads] elements of the current block uint64 mark[(segsize / 128) + 1]; uint64 id, i, j, minb, min, max, prime, temp1, temp2; id = blockIdx.x*blockDim.x + threadIdx.x; min = (completed*pow2_32) + (id*segsize) + 1; max = min + segsize - 2; uint64 max_sieved = 0; bool max_set = false; for (i = 0;((uint64)P[i] * (uint64)P[i] <= max) && (i<plen);i++) { prime = P[i]; minb = ((min / prime) * prime); if (minb < min) minb += prime; if (~minb & 1) minb += prime; for (j = minb;j <= max;j += (prime << 1)) { bitSet(mark, j - min + 1); } if (j - (prime << 1) > max_sieved) max_sieved = j - (prime << 1); } for (j = max; j >= min; j -= 2) { if (isPrime(mark, j - min + 1)) { printf("Kernel %llu: %llu|%llu|%llu|%d\n", id , j, max_sieved, max, max_set); break; } } } ////////////////////////////KERNELS END//////////////////////////// // SEGMENTED SIEVE // n RAM Time // E07 552KB 0.026s // E08 620KB 0.206s // E09 704KB 1.895s // E10 668KB 20.02s // E11 904KB 205.2s //PARALLEL SEGMENTED SIEVE // n RAM Time // E10 // E11 //Stats logged via Visual Studio Performance Profiler on i7 4790K @4.00GHz w/ 16GB DDR3 RAM and GTX 1070Ti //Driver function int main(uint32 argc, char* argv[]) { //Range: Data-type dependent uint64 n, m; printf("Enter n: "); scanf("%llu", &n); bool smallsieve = false; //Use serial sieve for n<2^32 if (n <= pow2_32) { smallsieve = true; printf("Rounded %llu to ", n); m = (uint64)sqrt(n); n = m * m; printf("%llu\n", n); } else if (n % pow2_32 > 0) //If n>2^32 then round n to nearest multiple of 2^32 { printf("Rounded %llu to ", n); n = ((n / pow2_32) + 1) * pow2_32; printf("%llu\n", n); m = (uint64)(sqrt(n)); } uint32 plen = 0; uint32 *P = NULL; if (~n & 1) n--; if (~m & 1) m--; P = segboolsieve(n, m, plen, smallsieve); if (P == NULL) { printf("Memory Allocation Failure!\n"); exit(0); } else printf("Last prime in utility sieve: %u @ index [%u]\n", P[plen - 1], plen - 1); if (smallsieve) { free(P); return 0; } uint32 chunkcount = (uint32)((n + 1) / pow2_32); //No. of chunks uint32 completed = 1; printf("\n%u chunk(s) for [%llu->%llu]\n", chunkcount - 1, pow2_32 + 1, n); uint32 *dP; //Log execution time float GPU_TIME = 0.0; float temp_t; //CUDA Malloc hipMalloc(&dP, (plen + 1) * (size)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); dim3 TPB(threads, 1, 1); dim3 BPG(blocks, 1, 1); hipMemcpy(dP, P, plen * size, hipMemcpyHostToDevice); while (completed < chunkcount) { hipEventRecord(start); SieveBlock << <BPG, TPB >> > (dP, completed, plen); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&temp_t, start, stop); GPU_TIME += temp_t; completed++; } free(P); hipFree(dP); GPU_TIME /= 1000; printf("COMPUTE-PHASE GPU Time: %0.3f seconds\n", GPU_TIME); return 0; }
3a1213da78476c0b466ab4eb39c4044a878a3965.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "smallsieve.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <time.h> typedef unsigned __int64 uint64; typedef unsigned __int32 uint32; const uint64 pow2_32 = 4294967296; const uint32 threads = 256; const uint32 blocks = 8; const uint64 segsize = pow2_32 / (threads*blocks); ///////////////////////////KERNELS START/////////////////////////// //Checks if x is prime; if bit corresponding to x is 0, then return true. __device__ bool isPrime(uint64 *mark, uint64 x) { return (mark[x / 128] & ((uint64)1 << ((x >> 1) & 63))) ? 0 : 1; } //Set the bit corresponding to x __device__ void bitSet(uint64 *mark, uint64 x) { mark[x / 128] |= ((uint64)1 << ((x >> 1) & 63)); } __global__ void SieveBlock(uint32 *P, uint32 completed, uint32 plen) { //Each thread sieves [(pow2_32 >> 1) / threads] elements of the current block uint64 mark[(segsize / 128) + 1]; uint64 id, i, j, minb, min, max, prime, temp1, temp2; id = blockIdx.x*blockDim.x + threadIdx.x; min = (completed*pow2_32) + (id*segsize) + 1; max = min + segsize - 2; uint64 max_sieved = 0; bool max_set = false; for (i = 0;((uint64)P[i] * (uint64)P[i] <= max) && (i<plen);i++) { prime = P[i]; minb = ((min / prime) * prime); if (minb < min) minb += prime; if (~minb & 1) minb += prime; for (j = minb;j <= max;j += (prime << 1)) { bitSet(mark, j - min + 1); } if (j - (prime << 1) > max_sieved) max_sieved = j - (prime << 1); } for (j = max; j >= min; j -= 2) { if (isPrime(mark, j - min + 1)) { printf("Kernel %llu: %llu|%llu|%llu|%d\n", id , j, max_sieved, max, max_set); break; } } } ////////////////////////////KERNELS END//////////////////////////// // SEGMENTED SIEVE // n RAM Time // E07 552KB 0.026s // E08 620KB 0.206s // E09 704KB 1.895s // E10 668KB 20.02s // E11 904KB 205.2s //PARALLEL SEGMENTED SIEVE // n RAM Time // E10 // E11 //Stats logged via Visual Studio Performance Profiler on i7 4790K @4.00GHz w/ 16GB DDR3 RAM and GTX 1070Ti //Driver function int main(uint32 argc, char* argv[]) { //Range: Data-type dependent uint64 n, m; printf("Enter n: "); scanf("%llu", &n); bool smallsieve = false; //Use serial sieve for n<2^32 if (n <= pow2_32) { smallsieve = true; printf("Rounded %llu to ", n); m = (uint64)sqrt(n); n = m * m; printf("%llu\n", n); } else if (n % pow2_32 > 0) //If n>2^32 then round n to nearest multiple of 2^32 { printf("Rounded %llu to ", n); n = ((n / pow2_32) + 1) * pow2_32; printf("%llu\n", n); m = (uint64)(sqrt(n)); } uint32 plen = 0; uint32 *P = NULL; if (~n & 1) n--; if (~m & 1) m--; P = segboolsieve(n, m, plen, smallsieve); if (P == NULL) { printf("Memory Allocation Failure!\n"); exit(0); } else printf("Last prime in utility sieve: %u @ index [%u]\n", P[plen - 1], plen - 1); if (smallsieve) { free(P); return 0; } uint32 chunkcount = (uint32)((n + 1) / pow2_32); //No. of chunks uint32 completed = 1; printf("\n%u chunk(s) for [%llu->%llu]\n", chunkcount - 1, pow2_32 + 1, n); uint32 *dP; //Log execution time float GPU_TIME = 0.0; float temp_t; //CUDA Malloc cudaMalloc(&dP, (plen + 1) * (size)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 TPB(threads, 1, 1); dim3 BPG(blocks, 1, 1); cudaMemcpy(dP, P, plen * size, cudaMemcpyHostToDevice); while (completed < chunkcount) { cudaEventRecord(start); SieveBlock << <BPG, TPB >> > (dP, completed, plen); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&temp_t, start, stop); GPU_TIME += temp_t; completed++; } free(P); cudaFree(dP); GPU_TIME /= 1000; printf("COMPUTE-PHASE GPU Time: %0.3f seconds\n", GPU_TIME); return 0; }
ebd85856f6efe0a655e25450743b049322aefbe7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Julian Gutierrez * Northeastern University * High Performance Computing * * Sobel Algorithm Implementation * */ #include "sobel.h" #define apron_size 1 #define overlap 2*apron_size /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); exit(-1); } #endif return result; } using namespace std; void modThreshold (unsigned int value){ threshold = value; } /* * Sobel Kernel */ __global__ void sobelAlgorithm(unsigned char *intensity, unsigned char *result, unsigned int imageW, unsigned int imageH, unsigned int threshold){ const int xt_start = max(blockIdx.x*TILE_SIZE, 1); const int yt_start = max(blockIdx.y*TILE_SIZE, 1); const int xt_end = min(xt_start + TILE_SIZE - 1, imageW - 2); const int yt_end = min(yt_start + TILE_SIZE - 1, imageH - 2); const int x = min(xt_start + threadIdx.x, xt_end); const int y = min(yt_start + threadIdx.y, yt_end); const int dataW = TILE_SIZE*gridDim.x; const int location = dataW*y + x; const int var1 = intensity[ dataW * (y-1) + x+1 ] - intensity[ dataW * (y+1) + x-1 ]; const int var2 = intensity[ dataW * (y+1) + x+1 ] - intensity[ dataW * (y-1) + x-1 ]; const int var3 = intensity[ dataW * (y) + x+1 ] - intensity[ dataW * (y) + x-1 ]; const int var4 = intensity[ dataW * (y+1) + x ] - intensity[ dataW * (y-1) + x ]; const int magnitude = (var1+var2+2*var3)*(var1+var2+2*var3) + (var1-var2-2*var4)*(var1-var2-2*var4); result[location] = ( (magnitude > threshold) ? 255 : 0); } unsigned char *sobel(unsigned char *intensity, unsigned int height, unsigned int width){ #if defined(DEBUG) printf("Printing input data\n"); printf("Height: %d\n", height); printf("Width: %d\n", width); #endif int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory #if defined(VERBOSE) printf ("Allocating arrays in GPU memory.\n"); #endif // Zero padding on the boarder of the image /*for (int j = 0; j<height ; j++) { for (int i = width-1; i>-1 ; i--) { intensity[width*j + i + 1] = intensity[width*j + i]; } intensity[width*j] = 0; intensity[width*j + width + 1] = 0; } for (int i = 0; i<width+2 ; i++) { for (int j = height-1; j>-1 ; j--) { intensity[width*(j+1) + i] = intensity[width*j + i]; } intensity[i] = 0; intensity[width*(height+1) + i] = 0; } // for (int i = width+2; i < XSize; i++) { for (int j = 0; j < height+2; j++) { intensity[XSize*j + i] = 0; } }*/ checkCuda(hipMalloc((void**)&gpu.intensity, gpu.size*sizeof(char))); checkCuda(hipMalloc((void**)&gpu.result, gpu.size*sizeof(char))); checkCuda(hipMemset(gpu.result , 0 , gpu.size)); checkCuda(hipMemset(gpu.intensity , 0 , gpu.size)); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(hipMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); #if defined(VERBOSE) printf("Running algorithm on GPU.\n"); #endif dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); #if defined(CUDA_TIMING) float Ttime; TIMER_CREATE(Ttime); TIMER_START(Ttime); #endif #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif // Launch modified kernel to begin image segmenation hipLaunchKernelGGL(( sobelAlgorithm), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu.intensity, gpu.result, width, height, threshold); checkCuda(hipDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Modified Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(hipMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(gpu.intensity)); checkCuda(hipFree(gpu.result)); #if defined(CUDA_TIMING) TIMER_END(Ttime); printf("Total GPU Execution Time: %f ms\n", Ttime); #endif return(gpu.resultOnCPU); } unsigned char *sobelWarmup(unsigned char *intensity, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width + 1) / TILE_SIZE); int gridYSize = 1 + ((height + 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(hipMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(hipMalloc((void**)&gpu.result , gpu.size*sizeof(char))); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(hipMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation hipLaunchKernelGGL(( sobelAlgorithm), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu.intensity, gpu.result, width, height, threshold); checkCuda(hipDeviceSynchronize()); // Retrieve results from the GPU checkCuda(hipMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(gpu.intensity)); checkCuda(hipFree(gpu.result)); return(gpu.resultOnCPU); }
ebd85856f6efe0a655e25450743b049322aefbe7.cu
/* Julian Gutierrez * Northeastern University * High Performance Computing * * Sobel Algorithm Implementation * */ #include "sobel.h" #define apron_size 1 #define overlap 2*apron_size /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); exit(-1); } #endif return result; } using namespace std; void modThreshold (unsigned int value){ threshold = value; } /* * Sobel Kernel */ __global__ void sobelAlgorithm(unsigned char *intensity, unsigned char *result, unsigned int imageW, unsigned int imageH, unsigned int threshold){ const int xt_start = max(blockIdx.x*TILE_SIZE, 1); const int yt_start = max(blockIdx.y*TILE_SIZE, 1); const int xt_end = min(xt_start + TILE_SIZE - 1, imageW - 2); const int yt_end = min(yt_start + TILE_SIZE - 1, imageH - 2); const int x = min(xt_start + threadIdx.x, xt_end); const int y = min(yt_start + threadIdx.y, yt_end); const int dataW = TILE_SIZE*gridDim.x; const int location = dataW*y + x; const int var1 = intensity[ dataW * (y-1) + x+1 ] - intensity[ dataW * (y+1) + x-1 ]; const int var2 = intensity[ dataW * (y+1) + x+1 ] - intensity[ dataW * (y-1) + x-1 ]; const int var3 = intensity[ dataW * (y) + x+1 ] - intensity[ dataW * (y) + x-1 ]; const int var4 = intensity[ dataW * (y+1) + x ] - intensity[ dataW * (y-1) + x ]; const int magnitude = (var1+var2+2*var3)*(var1+var2+2*var3) + (var1-var2-2*var4)*(var1-var2-2*var4); result[location] = ( (magnitude > threshold) ? 255 : 0); } unsigned char *sobel(unsigned char *intensity, unsigned int height, unsigned int width){ #if defined(DEBUG) printf("Printing input data\n"); printf("Height: %d\n", height); printf("Width: %d\n", width); #endif int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory #if defined(VERBOSE) printf ("Allocating arrays in GPU memory.\n"); #endif // Zero padding on the boarder of the image /*for (int j = 0; j<height ; j++) { for (int i = width-1; i>-1 ; i--) { intensity[width*j + i + 1] = intensity[width*j + i]; } intensity[width*j] = 0; intensity[width*j + width + 1] = 0; } for (int i = 0; i<width+2 ; i++) { for (int j = height-1; j>-1 ; j--) { intensity[width*(j+1) + i] = intensity[width*j + i]; } intensity[i] = 0; intensity[width*(height+1) + i] = 0; } // for (int i = width+2; i < XSize; i++) { for (int j = 0; j < height+2; j++) { intensity[XSize*j + i] = 0; } }*/ checkCuda(cudaMalloc((void**)&gpu.intensity, gpu.size*sizeof(char))); checkCuda(cudaMalloc((void**)&gpu.result, gpu.size*sizeof(char))); checkCuda(cudaMemset(gpu.result , 0 , gpu.size)); checkCuda(cudaMemset(gpu.intensity , 0 , gpu.size)); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(cudaMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); #if defined(VERBOSE) printf("Running algorithm on GPU.\n"); #endif dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); #if defined(CUDA_TIMING) float Ttime; TIMER_CREATE(Ttime); TIMER_START(Ttime); #endif #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif // Launch modified kernel to begin image segmenation sobelAlgorithm<<<dimGrid, dimBlock>>>(gpu.intensity, gpu.result, width, height, threshold); checkCuda(cudaDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Modified Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(cudaMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(gpu.intensity)); checkCuda(cudaFree(gpu.result)); #if defined(CUDA_TIMING) TIMER_END(Ttime); printf("Total GPU Execution Time: %f ms\n", Ttime); #endif return(gpu.resultOnCPU); } unsigned char *sobelWarmup(unsigned char *intensity, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width + 1) / TILE_SIZE); int gridYSize = 1 + ((height + 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). gpu.size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&gpu.intensity , gpu.size*sizeof(char))); checkCuda(cudaMalloc((void**)&gpu.result , gpu.size*sizeof(char))); // Allocate result array in CPU memory gpu.resultOnCPU = new unsigned char[gpu.size]; checkCuda(cudaMemcpy(gpu.intensity, intensity, gpu.size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_TILE_SIZE, BLOCK_TILE_SIZE); // Launch kernel to begin image segmenation sobelAlgorithm<<<dimGrid, dimBlock>>>(gpu.intensity, gpu.result, width, height, threshold); checkCuda(cudaDeviceSynchronize()); // Retrieve results from the GPU checkCuda(cudaMemcpy(gpu.resultOnCPU, gpu.result, gpu.size*sizeof(char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(gpu.intensity)); checkCuda(cudaFree(gpu.result)); return(gpu.resultOnCPU); }
af67b7dca6eb12eef229b703e57bdb3383b03612.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"CUDA assert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } int main(void) { int num_devices=0; cudaErrChk ( hipGetDeviceCount (&num_devices) ); printf("\n=================================================\n"); printf("The number of device(s) : %d\n", num_devices); printf("=================================================\n\n"); for (int i=0; i<num_devices; i++) { hipDeviceProp_t prop; cudaErrChk ( hipGetDeviceProperties (&prop, i) ); printf ("Device Number: %d\n", i); printf (" Device name: %s\n", prop.name); printf (" Device compute capability: %d.%d\n", prop.major, prop.minor); printf (" Number of SM(s): %d\n", prop.multiProcessorCount); printf (" Memory Clock Rate (GHz): %.2f\n", ((float)prop.memoryClockRate)/1.0e6); printf (" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf (" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf ("\n[Kernel size]\n"); printf (" Maximum size of a grid [%d, %d, %d]\n" , prop.maxGridSize[0], prop.maxGridSize[0], prop.maxGridSize[0]); printf (" Maximum size of a block [%d]\n" , prop.maxThreadsPerBlock); printf ("\n[Shared mem]\n"); printf (" Shared memory size per block :%dKB\n", (int)(prop.sharedMemPerBlock/1.0e3)); } printf("\n=================================================\n\n"); return 0; }
af67b7dca6eb12eef229b703e57bdb3383b03612.cu
#include <cstdio> #define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int main(void) { int num_devices=0; cudaErrChk ( cudaGetDeviceCount (&num_devices) ); printf("\n=================================================\n"); printf("The number of device(s) : %d\n", num_devices); printf("=================================================\n\n"); for (int i=0; i<num_devices; i++) { cudaDeviceProp prop; cudaErrChk ( cudaGetDeviceProperties (&prop, i) ); printf ("Device Number: %d\n", i); printf (" Device name: %s\n", prop.name); printf (" Device compute capability: %d.%d\n", prop.major, prop.minor); printf (" Number of SM(s): %d\n", prop.multiProcessorCount); printf (" Memory Clock Rate (GHz): %.2f\n", ((float)prop.memoryClockRate)/1.0e6); printf (" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf (" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf ("\n[Kernel size]\n"); printf (" Maximum size of a grid [%d, %d, %d]\n" , prop.maxGridSize[0], prop.maxGridSize[0], prop.maxGridSize[0]); printf (" Maximum size of a block [%d]\n" , prop.maxThreadsPerBlock); printf ("\n[Shared mem]\n"); printf (" Shared memory size per block :%dKB\n", (int)(prop.sharedMemPerBlock/1.0e3)); } printf("\n=================================================\n\n"); return 0; }
1e537bd79d575116ae03fd86d05257778e78b87b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "orttraining/training_ops/cuda/tensor/gather_grad_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/cuda_call.h" #include <hipcub/hipcub.hpp> #include <cub/iterator/counting_input_iterator.cuh> namespace onnxruntime { namespace cuda { template <typename T> __global__ void _Iota( hipcub::CountingInputIterator<T> input, size_t length, T* output) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(idx, length); output[idx] = input[idx]; } template <typename T, typename Tin> __global__ void _GatherGradImpl( const Tin* input, const Tin* indices, const T* grad_output, T* grad_weight, int64_t numel, int64_t input_numel, int64_t param_itrs, int64_t stride) { int idx = blockIdx.x * 4 + threadIdx.y; const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) { do { for (int itr = 0; itr < param_itrs; ++itr) { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = itr * input_numel + ((int)input[idx]) * stride; //the offset of the input const int grad_row = (itr * numel + ((int)indices[idx])) * stride; //the offset of the gradient float gradient[SZ]; float weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<float>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<float>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii]; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<T>(weight[ii]); } } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } template <typename T, typename Tin> void GatherGradImpl( const CudaKernel& cuda_kernel, const T* grad_data, const Tin* indices_data, const int64_t num_indices, const int64_t num_weights, const int64_t stride, T* output_data, const int64_t num_inputs, //The number of input elements starting from the gathering dimension const int64_t param_itrs //The size of dimensions of the data before gathering dimension ) { // allocate intermediate buffers auto original_indices = cuda_kernel.template GetScratchBuffer<Tin>(num_indices); // initialize original_indices with [0, num_indices) { const auto blocks_per_grid = CeilDiv(num_indices, GridDim::maxThreadsPerBlock); hipcub::CountingInputIterator<Tin> counting_input(Tin{}); hipLaunchKernelGGL(( _Iota), dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, 0, counting_input, num_indices, original_indices.get()); } auto indices_data_sorted = cuda_kernel.template GetScratchBuffer<Tin>(num_indices); auto original_indices_sorted = cuda_kernel.template GetScratchBuffer<Tin>(num_indices); // sort indices and original indices size_t sort_temp_storage_size_bytes = 0; CUDA_CALL_THROW(hipcub::DeviceRadixSort::SortPairs( nullptr, sort_temp_storage_size_bytes, indices_data, indices_data_sorted.get(), original_indices.get(), original_indices_sorted.get(), num_indices)); auto sort_temp_storage = cuda_kernel.GetScratchBuffer<void>(sort_temp_storage_size_bytes); CUDA_CALL_THROW(hipcub::DeviceRadixSort::SortPairs( sort_temp_storage.get(), sort_temp_storage_size_bytes, indices_data, indices_data_sorted.get(), original_indices.get(), original_indices_sorted.get(), num_indices)); dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_indices, 4), CeilDiv(stride, 128)); hipLaunchKernelGGL(( _GatherGradImpl), dim3(grid), dim3(block), 0, 0, indices_data_sorted.get(), original_indices_sorted.get(), grad_data, output_data, num_indices, num_inputs, param_itrs, stride); } #define SPECIALIZED_GRAD_IMPL2(T) \ template void GatherGradImpl<T, int64_t>( \ const CudaKernel& cuda_kernel, \ const T* grad_data, \ const int64_t* indices_data, \ const int64_t num_indices, \ const int64_t num_weights, \ const int64_t stride, \ T* output_data, \ const int64_t num_inputs, \ const int64_t param_itrs); \ template void GatherGradImpl<T, int32_t>( \ const CudaKernel& cuda_kernel, \ const T* grad_data, \ const int32_t* indices_data, \ const int64_t num_indices, \ const int64_t num_weights, \ const int64_t stride, \ T* output_data, \ const int64_t num_inputs, \ const int64_t param_itrs); SPECIALIZED_GRAD_IMPL2(float) SPECIALIZED_GRAD_IMPL2(half) } // namespace cuda } // namespace onnxruntime
1e537bd79d575116ae03fd86d05257778e78b87b.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "orttraining/training_ops/cuda/tensor/gather_grad_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/shared_inc/cuda_call.h" #include <cub/device/device_radix_sort.cuh> #include <cub/iterator/counting_input_iterator.cuh> namespace onnxruntime { namespace cuda { template <typename T> __global__ void _Iota( cub::CountingInputIterator<T> input, size_t length, T* output) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(idx, length); output[idx] = input[idx]; } template <typename T, typename Tin> __global__ void _GatherGradImpl( const Tin* input, const Tin* indices, const T* grad_output, T* grad_weight, int64_t numel, int64_t input_numel, int64_t param_itrs, int64_t stride) { int idx = blockIdx.x * 4 + threadIdx.y; const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) { do { for (int itr = 0; itr < param_itrs; ++itr) { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = itr * input_numel + ((int)input[idx]) * stride; //the offset of the input const int grad_row = (itr * numel + ((int)indices[idx])) * stride; //the offset of the gradient float gradient[SZ]; float weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<float>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<float>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii]; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * GPU_WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<T>(weight[ii]); } } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } template <typename T, typename Tin> void GatherGradImpl( const CudaKernel& cuda_kernel, const T* grad_data, const Tin* indices_data, const int64_t num_indices, const int64_t num_weights, const int64_t stride, T* output_data, const int64_t num_inputs, //The number of input elements starting from the gathering dimension const int64_t param_itrs //The size of dimensions of the data before gathering dimension ) { // allocate intermediate buffers auto original_indices = cuda_kernel.template GetScratchBuffer<Tin>(num_indices); // initialize original_indices with [0, num_indices) { const auto blocks_per_grid = CeilDiv(num_indices, GridDim::maxThreadsPerBlock); cub::CountingInputIterator<Tin> counting_input(Tin{}); _Iota<<<blocks_per_grid, GridDim::maxThreadsPerBlock>>>( counting_input, num_indices, original_indices.get()); } auto indices_data_sorted = cuda_kernel.template GetScratchBuffer<Tin>(num_indices); auto original_indices_sorted = cuda_kernel.template GetScratchBuffer<Tin>(num_indices); // sort indices and original indices size_t sort_temp_storage_size_bytes = 0; CUDA_CALL_THROW(cub::DeviceRadixSort::SortPairs( nullptr, sort_temp_storage_size_bytes, indices_data, indices_data_sorted.get(), original_indices.get(), original_indices_sorted.get(), num_indices)); auto sort_temp_storage = cuda_kernel.GetScratchBuffer<void>(sort_temp_storage_size_bytes); CUDA_CALL_THROW(cub::DeviceRadixSort::SortPairs( sort_temp_storage.get(), sort_temp_storage_size_bytes, indices_data, indices_data_sorted.get(), original_indices.get(), original_indices_sorted.get(), num_indices)); dim3 block(GPU_WARP_SIZE, 4); dim3 grid(CeilDiv(num_indices, 4), CeilDiv(stride, 128)); _GatherGradImpl<<<grid, block>>>( indices_data_sorted.get(), original_indices_sorted.get(), grad_data, output_data, num_indices, num_inputs, param_itrs, stride); } #define SPECIALIZED_GRAD_IMPL2(T) \ template void GatherGradImpl<T, int64_t>( \ const CudaKernel& cuda_kernel, \ const T* grad_data, \ const int64_t* indices_data, \ const int64_t num_indices, \ const int64_t num_weights, \ const int64_t stride, \ T* output_data, \ const int64_t num_inputs, \ const int64_t param_itrs); \ template void GatherGradImpl<T, int32_t>( \ const CudaKernel& cuda_kernel, \ const T* grad_data, \ const int32_t* indices_data, \ const int64_t num_indices, \ const int64_t num_weights, \ const int64_t stride, \ T* output_data, \ const int64_t num_inputs, \ const int64_t param_itrs); SPECIALIZED_GRAD_IMPL2(float) SPECIALIZED_GRAD_IMPL2(half) } // namespace cuda } // namespace onnxruntime
4220913275bfa44f82dd608354ecce8bfbb59c50.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <cmath> __device__ int position; //index of the largest value __device__ int largest; //value of the largest value int lenString = 593; int maxNumStrings = 1000000; int threshold = 2; // Checks if there are any unmerged tuples (Parallelized) __global__ void anyLeft(int *d_c, int *remaining, int size) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if((d_c[my_id] == 0) && (my_id < size)) { *remaining = 0; } } // Searches for the index of the largest count (Parallelized) __global__ void search(int *d_b, int *d_c, int size) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if((d_c[my_id] == 0) && (d_b[my_id] == largest) && (my_id < size)) { position = my_id; } } // Populates copy_db such that the counts for merged tuple is 0 // but count for unmerged tuples is unchanged (Parallelized) __global__ void populate (int *d_b, int *copy_db, int *d_c, int size, int *left) { int n = 0; *left = 1; // reinitalized to false to check if all strings are merged int my_id = blockDim.x * blockIdx.x + threadIdx.x; if (my_id < size) { n = abs((bool)d_c[my_id] - 1); copy_db[my_id] = d_b[my_id] * n; } } // Reduction-type tree implementation to find largest count (Parallelized) __device__ void cuda_select(int *db, int size) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if(my_id < size) { if(db[2 * my_id] > db[2 * my_id + 1]) db[my_id] = db[2 * my_id]; else db[my_id] = db[2 * my_id + 1]; } } // Loops cuda_select function until largest value is at index 0 __global__ void select(int *db, int size) { int height = (int)ceil(log2((double)size)); int i = 0; for(i = 0; i < height; i++) { size = (int)ceil((double) size/2); cuda_select(db, size); } largest = db[0]; } // Compares target string to all other unmerged strings with lesser count __global__ void compare(char *d_a, int *d_b, int *d_c, int size, int lenString, int threshold) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if (my_id == position) d_c[my_id] = 2; if ((my_id < size) && (d_c[my_id] == 0) && (my_id != position)) { int x, diffs = 0; for (x = 0; x < lenString; x++) { diffs += (bool)(d_a[(lenString*position)+x]^d_a[(my_id*lenString)+x]); if (diffs > threshold) break; } if (diffs <= threshold) { d_b[position] += d_b[my_id]; d_c[my_id] = 1; } } } int main(int argc, char** argv) { char *strings, *d_a; // host and device copy of strings int *counts, *d_b; // host and device copy of counts int *merged, *d_c; // host and device copy of bools int *copy_db; // device copy of counts (counts of merged is 0) char copy[lenString+1]; // intermediate variable to load strings into array from file int numbers; // intermediate variable to load counts into array from file int *any_left, *left; // host and device copies to check if all tuples are merged int size = 0; // keeps track of number of tuples in the file int i = 0; // loop variable int size_string = maxNumStrings*sizeof(char)*(lenString+1); int size_int = maxNumStrings*sizeof(int); // Open the file FILE *fp; fp = fopen("/cluster/home/charliep/courses/cs360/single-linkage-clustering/Iceland2014.trim.contigs.good.unique.good.filter.unique.count.fasta", "r"); // Allocate space for arrays on the host if (!(strings = (char *)malloc(size_string))) { fprintf(stderr, "malloc() FAILED (Block)\n"); exit(0); } if (!(counts = (int*)malloc(size_int))) { fprintf(stderr, "malloc() FAILED (Block)\n"); exit(0); } if (!(merged = (int*)malloc(size_int))) { fprintf(stderr, "malloc() FAILED (Block)\n"); exit(0); } any_left = (int *)malloc(sizeof(int)); // Set the values of global variables on the device hipMemset(&position, 0, sizeof(int)); hipMemset(&largest, 0, sizeof(int)); // Load strings and counts into array while(fscanf(fp, "%s %d", copy, &numbers) != EOF && size < 1000){ strcpy(&strings[i], copy); counts[size] = numbers; i = i + lenString; size++; } // Close file fclose(fp); // Allocate space for arrays on the device hipMalloc(&d_a, size_string); hipMalloc(&d_b, size_int); hipMalloc(&d_c, size_int); hipMalloc(&copy_db, size_int); hipMalloc(&left, sizeof(int)); // Copy arrays from host to device hipMemcpy(d_a, strings, size_string, hipMemcpyHostToDevice); hipMemcpy(d_b, counts, size_int, hipMemcpyHostToDevice); hipMemcpy(d_c, merged, size_int, hipMemcpyHostToDevice); // Determine number of threads and blocks needed int threads_num = 512, blocks_num; blocks_num = (int)ceil((float)size/threads_num); // Cluster the strings for the given threshold do { hipLaunchKernelGGL(( populate), dim3(blocks_num), dim3(threads_num), 0, 0, d_b, copy_db, d_c, size,left); hipLaunchKernelGGL(( select), dim3(blocks_num), dim3(threads_num), 0, 0, copy_db, size); hipLaunchKernelGGL(( search), dim3(blocks_num), dim3(threads_num), 0, 0, d_b, d_c, size); hipLaunchKernelGGL(( compare), dim3(blocks_num), dim3(threads_num), 0, 0, d_a, d_b, d_c, size, lenString, threshold); hipLaunchKernelGGL(( anyLeft), dim3(blocks_num), dim3(threads_num), 0, 0, d_c, left, size); hipMemcpy(any_left, left, sizeof(int), hipMemcpyDeviceToHost); } while (*any_left == 0); // Copy results back from device to host hipMemcpy(strings, d_a, size_string, hipMemcpyDeviceToHost); hipMemcpy(counts, d_b, size_int, hipMemcpyDeviceToHost); hipMemcpy(merged, d_c, size_int, hipMemcpyDeviceToHost); int counter = 0; FILE *output = fopen("output2.txt", "w+"); for(i = 0; i < size; i++) { strncpy(copy, &strings[i*lenString], lenString); fprintf(output, "%s %d\n", copy, counts[i]); if (merged[i] == 2) counter++; } fclose(output); printf("%d\n", counter); hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(copy_db); hipFree(left); free(strings); free(counts); free(merged); free(any_left); }
4220913275bfa44f82dd608354ecce8bfbb59c50.cu
#include <stdio.h> #include <stdlib.h> #include <cmath> __device__ int position; //index of the largest value __device__ int largest; //value of the largest value int lenString = 593; int maxNumStrings = 1000000; int threshold = 2; // Checks if there are any unmerged tuples (Parallelized) __global__ void anyLeft(int *d_c, int *remaining, int size) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if((d_c[my_id] == 0) && (my_id < size)) { *remaining = 0; } } // Searches for the index of the largest count (Parallelized) __global__ void search(int *d_b, int *d_c, int size) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if((d_c[my_id] == 0) && (d_b[my_id] == largest) && (my_id < size)) { position = my_id; } } // Populates copy_db such that the counts for merged tuple is 0 // but count for unmerged tuples is unchanged (Parallelized) __global__ void populate (int *d_b, int *copy_db, int *d_c, int size, int *left) { int n = 0; *left = 1; // reinitalized to false to check if all strings are merged int my_id = blockDim.x * blockIdx.x + threadIdx.x; if (my_id < size) { n = abs((bool)d_c[my_id] - 1); copy_db[my_id] = d_b[my_id] * n; } } // Reduction-type tree implementation to find largest count (Parallelized) __device__ void cuda_select(int *db, int size) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if(my_id < size) { if(db[2 * my_id] > db[2 * my_id + 1]) db[my_id] = db[2 * my_id]; else db[my_id] = db[2 * my_id + 1]; } } // Loops cuda_select function until largest value is at index 0 __global__ void select(int *db, int size) { int height = (int)ceil(log2((double)size)); int i = 0; for(i = 0; i < height; i++) { size = (int)ceil((double) size/2); cuda_select(db, size); } largest = db[0]; } // Compares target string to all other unmerged strings with lesser count __global__ void compare(char *d_a, int *d_b, int *d_c, int size, int lenString, int threshold) { int my_id = blockDim.x * blockIdx.x + threadIdx.x; if (my_id == position) d_c[my_id] = 2; if ((my_id < size) && (d_c[my_id] == 0) && (my_id != position)) { int x, diffs = 0; for (x = 0; x < lenString; x++) { diffs += (bool)(d_a[(lenString*position)+x]^d_a[(my_id*lenString)+x]); if (diffs > threshold) break; } if (diffs <= threshold) { d_b[position] += d_b[my_id]; d_c[my_id] = 1; } } } int main(int argc, char** argv) { char *strings, *d_a; // host and device copy of strings int *counts, *d_b; // host and device copy of counts int *merged, *d_c; // host and device copy of bools int *copy_db; // device copy of counts (counts of merged is 0) char copy[lenString+1]; // intermediate variable to load strings into array from file int numbers; // intermediate variable to load counts into array from file int *any_left, *left; // host and device copies to check if all tuples are merged int size = 0; // keeps track of number of tuples in the file int i = 0; // loop variable int size_string = maxNumStrings*sizeof(char)*(lenString+1); int size_int = maxNumStrings*sizeof(int); // Open the file FILE *fp; fp = fopen("/cluster/home/charliep/courses/cs360/single-linkage-clustering/Iceland2014.trim.contigs.good.unique.good.filter.unique.count.fasta", "r"); // Allocate space for arrays on the host if (!(strings = (char *)malloc(size_string))) { fprintf(stderr, "malloc() FAILED (Block)\n"); exit(0); } if (!(counts = (int*)malloc(size_int))) { fprintf(stderr, "malloc() FAILED (Block)\n"); exit(0); } if (!(merged = (int*)malloc(size_int))) { fprintf(stderr, "malloc() FAILED (Block)\n"); exit(0); } any_left = (int *)malloc(sizeof(int)); // Set the values of global variables on the device cudaMemset(&position, 0, sizeof(int)); cudaMemset(&largest, 0, sizeof(int)); // Load strings and counts into array while(fscanf(fp, "%s %d", copy, &numbers) != EOF && size < 1000){ strcpy(&strings[i], copy); counts[size] = numbers; i = i + lenString; size++; } // Close file fclose(fp); // Allocate space for arrays on the device cudaMalloc(&d_a, size_string); cudaMalloc(&d_b, size_int); cudaMalloc(&d_c, size_int); cudaMalloc(&copy_db, size_int); cudaMalloc(&left, sizeof(int)); // Copy arrays from host to device cudaMemcpy(d_a, strings, size_string, cudaMemcpyHostToDevice); cudaMemcpy(d_b, counts, size_int, cudaMemcpyHostToDevice); cudaMemcpy(d_c, merged, size_int, cudaMemcpyHostToDevice); // Determine number of threads and blocks needed int threads_num = 512, blocks_num; blocks_num = (int)ceil((float)size/threads_num); // Cluster the strings for the given threshold do { populate<<<blocks_num, threads_num>>>(d_b, copy_db, d_c, size,left); select<<<blocks_num, threads_num>>>(copy_db, size); search<<<blocks_num, threads_num>>>(d_b, d_c, size); compare<<<blocks_num, threads_num>>>(d_a, d_b, d_c, size, lenString, threshold); anyLeft<<<blocks_num, threads_num>>>(d_c, left, size); cudaMemcpy(any_left, left, sizeof(int), cudaMemcpyDeviceToHost); } while (*any_left == 0); // Copy results back from device to host cudaMemcpy(strings, d_a, size_string, cudaMemcpyDeviceToHost); cudaMemcpy(counts, d_b, size_int, cudaMemcpyDeviceToHost); cudaMemcpy(merged, d_c, size_int, cudaMemcpyDeviceToHost); int counter = 0; FILE *output = fopen("output2.txt", "w+"); for(i = 0; i < size; i++) { strncpy(copy, &strings[i*lenString], lenString); fprintf(output, "%s %d\n", copy, counts[i]); if (merged[i] == 2) counter++; } fclose(output); printf("%d\n", counter); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(copy_db); cudaFree(left); free(strings); free(counts); free(merged); free(any_left); }
e22301bcce2429f313672e7ff839b757139bfc1b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/topk_impl.cuh" #include "backend/kernel_compiler/gpu/cuda_impl/topk_lib.cuh" #include <limits> #include <algorithm> const int kMaxQueue = 128; #define TOPK_HELPER(BLOCK, NUM_WARP_Q, NUM_THREAD_Q, IS_DESCEND) \ do { \ hipLaunchKernelGGL(( TopKBlock<T, S, NUM_WARP_Q, NUM_THREAD_Q, BLOCK, IS_DESCEND>) \ , dim3(block_num_limit), dim3(BLOCK), 0, stream, outer_size, inner_size, input, output, output_index, k_cut, init_K); \ } while (0) #define LEFT_INSERT_THREAD_QUEUE(_k, _v) \ do { \ if (is_descend ? CmpKV<T, S>::gt(_k, _v, (*ceil_K), (*ceil_V)) : CmpKV<T, S>::lt(_k, _v, (*ceil_K), (*ceil_V))) \ break; \ if (is_descend ? CmpKV<T, S>::gt(_k, _v, warp_K_top, warp_V_top) \ : CmpKV<T, S>::lt(_k, _v, warp_K_top, warp_V_top)) { \ { \ _Pragma("unroll") for (int i = thread_queue - 1; i > 0; --i) { \ threadK[i] = threadK[i - 1]; \ threadV[i] = threadV[i - 1]; \ } \ } \ threadK[0] = _k; \ threadV[0] = _v; \ ++num_vals; \ } \ } while (0) template <typename T, typename S, int warp_queue, int thread_queue, int threads_per_block, bool is_descend> inline __device__ void TopKInBuffer(T *shared_K, S *shared_V, int *watermark, T *ceil_K, S *ceil_V, int laneId) { constexpr int kNumWarps = threads_per_block / kWarpSize; // kNumWarps is 1024/32=32 // find last_K, which is max of last element of warp queue T last_K = shared_K[laneId * warp_queue + warp_queue - 1]; S last_V = shared_V[laneId * warp_queue + warp_queue - 1]; __syncwarp(); for (int offset = kNumWarps / 2; offset > 0; offset /= 2) { // kNumWarps is 32 if block size is 1024 T other_K = __shfl_down_sync(0xffffffff, last_K, offset); S other_V = __shfl_down_sync(0xffffffff, last_V, offset); bool is_greater = CmpKV<T, S>::gt(other_K, other_V, last_K, last_V); ConditionalAssign(is_greater, &last_K, other_K); ConditionalAssign(is_greater, &last_V, other_V); } __syncwarp(); if (laneId == 0) { *ceil_K = last_K; *ceil_V = last_V; } __syncwarp(); // calculate index cut by last_K int L = 0; int R = warp_queue; while (L < R) { int m = (L + R) / 2; CmpKV<T, S>::gt(shared_K[laneId * warp_queue + m], shared_V[laneId * warp_queue + m], (*ceil_K), (*ceil_V)) ? L = m + 1 : R = m; } __syncwarp(); // merge top number which value is greater than last_K for (int offset = kNumWarps / 2; offset > 0; offset /= 2) { R += __shfl_down_sync(0xffffffff, R, offset); } __syncwarp(); if (laneId == 0) { watermark[0] = R; } __syncwarp(); } template <typename T, typename S, int warp_queue, int thread_queue, int threads_per_block, bool is_descend> inline __device__ void TopKStep(const int &outer_size, const int &inner_size, const T *input, T *output, S *output_index, S k_cut, const T &init_K, const int &outer_id, T *shared_K, S *shared_V, int *watermark, T *threadK, S *threadV, T *ceil_K, S *ceil_V, S *k_prime) { constexpr int kNumWarps = threads_per_block / kWarpSize; constexpr S init_V = static_cast<S>(-1); T *warp_K; S *warp_V; T warp_K_top = init_K; S warp_V_top = init_V; int k_minus_1 = (k_cut <= kMaxQueue ? k_cut - 1 : kMaxQueue - 1); int num_vals = 0; int limit = (inner_size / kWarpSize) * kWarpSize; _Pragma("unroll") for (int i = 0; i < thread_queue; ++i) { threadK[i] = init_K; threadV[i] = init_V; } int laneId = GetLaneId(); int warpId = threadIdx.x / kWarpSize; // 0,1,2 or 3 warp_K = shared_K + warpId * warp_queue; warp_V = shared_V + warpId * warp_queue; for (int i = laneId; i < warp_queue; i += kWarpSize) { warp_K[i] = init_K; warp_V[i] = init_V; } __syncwarp(); int i = threadIdx.x; for (; i < limit; i += threads_per_block) { LEFT_INSERT_THREAD_QUEUE((input[outer_id * inner_size + i]), (outer_id * inner_size + i)); bool needSort = (num_vals == thread_queue); needSort = __any_sync(0xffffffff, needSort); if (!needSort) continue; MergeWarpQueue<T, S, warp_queue, thread_queue, is_descend>(threadK, threadV, warp_K, warp_V); num_vals = 0; _Pragma("unroll") for (int i = 0; i < thread_queue; ++i) { threadK[i] = init_K; threadV[i] = init_V; } warp_K_top = warp_K[k_minus_1]; warp_V_top = warp_V[k_minus_1]; __syncwarp(); } if (i < inner_size) { LEFT_INSERT_THREAD_QUEUE((input[outer_id * inner_size + i]), (outer_id * inner_size + i)); } MergeWarpQueue<T, S, warp_queue, thread_queue, is_descend>(threadK, threadV, warp_K, warp_V); __syncthreads(); if (k_cut > kMaxQueue && warpId == 0) { TopKInBuffer<T, S, warp_queue, thread_queue, threads_per_block, is_descend>(shared_K, shared_V, watermark, ceil_K, ceil_V, laneId); } __syncthreads(); SortBlockWide<kNumWarps, threads_per_block, T, S, warp_queue, is_descend>(shared_K, shared_V); S k_step = (*k_prime) + watermark[0] <= k_cut ? watermark[0] : k_cut - (*k_prime); for (int i = threadIdx.x; i < k_step; i += blockDim.x) { output[outer_id * k_cut + (*k_prime) + i] = shared_K[i]; output_index[outer_id * k_cut + (*k_prime) + i] = shared_V[i] % inner_size; } *k_prime += k_step; __syncthreads(); } template <typename T, typename S, int warp_queue, int thread_queue, int threads_per_block, bool is_descend> __global__ void TopKBlock(int outer_size, int inner_size, const T *input, T *output, S *output_index, S k_cut, const T init_K) { constexpr int kNumWarps = threads_per_block / kWarpSize; __shared__ T shared_K[kNumWarps * warp_queue]; __shared__ S shared_V[kNumWarps * warp_queue]; __shared__ int watermark[1]; __shared__ T ceil_K; __shared__ S ceil_V; T threadK[thread_queue]; // NOLINT S threadV[thread_queue]; // NOLINT for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < blockDim.x * outer_size; t_idx += blockDim.x * gridDim.x) { S k_prime = 0; int outer_id = t_idx / blockDim.x; ceil_K = -init_K; ceil_V = -1; watermark[0] = k_cut; do { TopKStep<T, S, warp_queue, thread_queue, threads_per_block, is_descend>( outer_size, inner_size, input, output, output_index, k_cut, init_K, outer_id, shared_K, shared_V, watermark, threadK, threadV, &ceil_K, &ceil_V, &k_prime); } while (k_prime < k_cut); } } template <typename T, typename S> void FastTopK(const int outer_size, const int inner_size, const T *input, S k_cut, T *output, S *output_index, const T init_K, hipStream_t stream) { int block_num_limit = outer_size < 128 ? outer_size : 128; if (k_cut > inner_size) k_cut = inner_size; if (k_cut <= 32) { // num-threads-of-block, warp-queue-size, thread-queue-size TOPK_HELPER(256, 32, 2, true); } else if (k_cut <= 64) { TOPK_HELPER(256, 64, 3, true); } else if (k_cut <= 128) { TOPK_HELPER(256, 128, 3, true); } else { TOPK_HELPER(1024, 128, 3, true); } } template void FastTopK(const int outer_size, const int inner_size, const half *input, int k_cut, half *output, int *output_index, const half init_K, hipStream_t stream); template void FastTopK(const int outer_size, const int inner_size, const float *input, int k_cut, float *output, int *output_index, const float init_K, hipStream_t stream);
e22301bcce2429f313672e7ff839b757139bfc1b.cu
/** * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/topk_impl.cuh" #include "backend/kernel_compiler/gpu/cuda_impl/topk_lib.cuh" #include <limits> #include <algorithm> const int kMaxQueue = 128; #define TOPK_HELPER(BLOCK, NUM_WARP_Q, NUM_THREAD_Q, IS_DESCEND) \ do { \ TopKBlock<T, S, NUM_WARP_Q, NUM_THREAD_Q, BLOCK, IS_DESCEND> \ <<<block_num_limit, BLOCK, 0, stream>>>(outer_size, inner_size, input, output, output_index, k_cut, init_K); \ } while (0) #define LEFT_INSERT_THREAD_QUEUE(_k, _v) \ do { \ if (is_descend ? CmpKV<T, S>::gt(_k, _v, (*ceil_K), (*ceil_V)) : CmpKV<T, S>::lt(_k, _v, (*ceil_K), (*ceil_V))) \ break; \ if (is_descend ? CmpKV<T, S>::gt(_k, _v, warp_K_top, warp_V_top) \ : CmpKV<T, S>::lt(_k, _v, warp_K_top, warp_V_top)) { \ { \ _Pragma("unroll") for (int i = thread_queue - 1; i > 0; --i) { \ threadK[i] = threadK[i - 1]; \ threadV[i] = threadV[i - 1]; \ } \ } \ threadK[0] = _k; \ threadV[0] = _v; \ ++num_vals; \ } \ } while (0) template <typename T, typename S, int warp_queue, int thread_queue, int threads_per_block, bool is_descend> inline __device__ void TopKInBuffer(T *shared_K, S *shared_V, int *watermark, T *ceil_K, S *ceil_V, int laneId) { constexpr int kNumWarps = threads_per_block / kWarpSize; // kNumWarps is 1024/32=32 // find last_K, which is max of last element of warp queue T last_K = shared_K[laneId * warp_queue + warp_queue - 1]; S last_V = shared_V[laneId * warp_queue + warp_queue - 1]; __syncwarp(); for (int offset = kNumWarps / 2; offset > 0; offset /= 2) { // kNumWarps is 32 if block size is 1024 T other_K = __shfl_down_sync(0xffffffff, last_K, offset); S other_V = __shfl_down_sync(0xffffffff, last_V, offset); bool is_greater = CmpKV<T, S>::gt(other_K, other_V, last_K, last_V); ConditionalAssign(is_greater, &last_K, other_K); ConditionalAssign(is_greater, &last_V, other_V); } __syncwarp(); if (laneId == 0) { *ceil_K = last_K; *ceil_V = last_V; } __syncwarp(); // calculate index cut by last_K int L = 0; int R = warp_queue; while (L < R) { int m = (L + R) / 2; CmpKV<T, S>::gt(shared_K[laneId * warp_queue + m], shared_V[laneId * warp_queue + m], (*ceil_K), (*ceil_V)) ? L = m + 1 : R = m; } __syncwarp(); // merge top number which value is greater than last_K for (int offset = kNumWarps / 2; offset > 0; offset /= 2) { R += __shfl_down_sync(0xffffffff, R, offset); } __syncwarp(); if (laneId == 0) { watermark[0] = R; } __syncwarp(); } template <typename T, typename S, int warp_queue, int thread_queue, int threads_per_block, bool is_descend> inline __device__ void TopKStep(const int &outer_size, const int &inner_size, const T *input, T *output, S *output_index, S k_cut, const T &init_K, const int &outer_id, T *shared_K, S *shared_V, int *watermark, T *threadK, S *threadV, T *ceil_K, S *ceil_V, S *k_prime) { constexpr int kNumWarps = threads_per_block / kWarpSize; constexpr S init_V = static_cast<S>(-1); T *warp_K; S *warp_V; T warp_K_top = init_K; S warp_V_top = init_V; int k_minus_1 = (k_cut <= kMaxQueue ? k_cut - 1 : kMaxQueue - 1); int num_vals = 0; int limit = (inner_size / kWarpSize) * kWarpSize; _Pragma("unroll") for (int i = 0; i < thread_queue; ++i) { threadK[i] = init_K; threadV[i] = init_V; } int laneId = GetLaneId(); int warpId = threadIdx.x / kWarpSize; // 0,1,2 or 3 warp_K = shared_K + warpId * warp_queue; warp_V = shared_V + warpId * warp_queue; for (int i = laneId; i < warp_queue; i += kWarpSize) { warp_K[i] = init_K; warp_V[i] = init_V; } __syncwarp(); int i = threadIdx.x; for (; i < limit; i += threads_per_block) { LEFT_INSERT_THREAD_QUEUE((input[outer_id * inner_size + i]), (outer_id * inner_size + i)); bool needSort = (num_vals == thread_queue); needSort = __any_sync(0xffffffff, needSort); if (!needSort) continue; MergeWarpQueue<T, S, warp_queue, thread_queue, is_descend>(threadK, threadV, warp_K, warp_V); num_vals = 0; _Pragma("unroll") for (int i = 0; i < thread_queue; ++i) { threadK[i] = init_K; threadV[i] = init_V; } warp_K_top = warp_K[k_minus_1]; warp_V_top = warp_V[k_minus_1]; __syncwarp(); } if (i < inner_size) { LEFT_INSERT_THREAD_QUEUE((input[outer_id * inner_size + i]), (outer_id * inner_size + i)); } MergeWarpQueue<T, S, warp_queue, thread_queue, is_descend>(threadK, threadV, warp_K, warp_V); __syncthreads(); if (k_cut > kMaxQueue && warpId == 0) { TopKInBuffer<T, S, warp_queue, thread_queue, threads_per_block, is_descend>(shared_K, shared_V, watermark, ceil_K, ceil_V, laneId); } __syncthreads(); SortBlockWide<kNumWarps, threads_per_block, T, S, warp_queue, is_descend>(shared_K, shared_V); S k_step = (*k_prime) + watermark[0] <= k_cut ? watermark[0] : k_cut - (*k_prime); for (int i = threadIdx.x; i < k_step; i += blockDim.x) { output[outer_id * k_cut + (*k_prime) + i] = shared_K[i]; output_index[outer_id * k_cut + (*k_prime) + i] = shared_V[i] % inner_size; } *k_prime += k_step; __syncthreads(); } template <typename T, typename S, int warp_queue, int thread_queue, int threads_per_block, bool is_descend> __global__ void TopKBlock(int outer_size, int inner_size, const T *input, T *output, S *output_index, S k_cut, const T init_K) { constexpr int kNumWarps = threads_per_block / kWarpSize; __shared__ T shared_K[kNumWarps * warp_queue]; __shared__ S shared_V[kNumWarps * warp_queue]; __shared__ int watermark[1]; __shared__ T ceil_K; __shared__ S ceil_V; T threadK[thread_queue]; // NOLINT S threadV[thread_queue]; // NOLINT for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < blockDim.x * outer_size; t_idx += blockDim.x * gridDim.x) { S k_prime = 0; int outer_id = t_idx / blockDim.x; ceil_K = -init_K; ceil_V = -1; watermark[0] = k_cut; do { TopKStep<T, S, warp_queue, thread_queue, threads_per_block, is_descend>( outer_size, inner_size, input, output, output_index, k_cut, init_K, outer_id, shared_K, shared_V, watermark, threadK, threadV, &ceil_K, &ceil_V, &k_prime); } while (k_prime < k_cut); } } template <typename T, typename S> void FastTopK(const int outer_size, const int inner_size, const T *input, S k_cut, T *output, S *output_index, const T init_K, cudaStream_t stream) { int block_num_limit = outer_size < 128 ? outer_size : 128; if (k_cut > inner_size) k_cut = inner_size; if (k_cut <= 32) { // num-threads-of-block, warp-queue-size, thread-queue-size TOPK_HELPER(256, 32, 2, true); } else if (k_cut <= 64) { TOPK_HELPER(256, 64, 3, true); } else if (k_cut <= 128) { TOPK_HELPER(256, 128, 3, true); } else { TOPK_HELPER(1024, 128, 3, true); } } template void FastTopK(const int outer_size, const int inner_size, const half *input, int k_cut, half *output, int *output_index, const half init_K, cudaStream_t stream); template void FastTopK(const int outer_size, const int inner_size, const float *input, int k_cut, float *output, int *output_index, const float init_K, cudaStream_t stream);
6ba6e881ade29204e5593a0a012d816b4805e076.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" #include <vector> // Shared memory is 50 Kbyte per SM and an int is 4 Bytes so // if the TILE is greater than MAXTILE the array will not fit // in shared memory static constexpr int MAXTILE_Shared { 8192}; static constexpr int MAXTILE { ( MAXTILE_Shared < 2 * StreamCompaction::Efficient::blockSize) ? MAXTILE_Shared : 2 * StreamCompaction::Efficient::blockSize}; //static constexpr int devStart{ 0 }; //static constexpr int scanStart{ 0 }; static constexpr bool printDebug{ false }; void printArray2(int n, int *a, bool abridged = false) { printf(" [ "); for (int i = 0; i < n; i++) { if (abridged && i + 2 == 15 && n > 16) { i = n - 2; printf("... "); } printf("%3d ", a[i]); } printf("]\n"); } namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // initialize the arrays on the GPU // returns the addresses of the pointers on the GPU // dev_idata is a pointer to the address of the dev_idata array that // gets updated here // initialize dev_idata has the first // elements copied and the remainder to make the stream 2^n // are set to 0. The first input is the size of the arrays // to allocate and the second input is the size of the array to transfer. // N the maximum size of the allocated array. n is the size of the data array // N is one more than the multiple of 2 greater or equal to n, // in dev_idata, and then the elements are copied inte dev_idata. void initScan(int N, int n, const int *idata, int ** dev_idata) { int size{ sizeof(int) }; hipMalloc(reinterpret_cast<void**>(dev_idata), N * size); checkCUDAError("Allocating Scan Buffer Efficient Error"); hipMemcpy(static_cast<void*>(*dev_idata), idata, n *size, hipMemcpyHostToDevice); hipMemset(static_cast<void*>(*dev_idata + n), 0, (N - n) * size); // no need to initialize the odata because the loop does that each time checkCUDAError("Initialize and Copy data to target Error"); hipDeviceSynchronize(); } void initScanSum(int numblocks, int ** scan_sum) { int size {sizeof(int)}; hipMalloc(reinterpret_cast<void**>(scan_sum), numblocks * size); checkCUDAError("Allocating Scan Efficient shared Error"); } // transfer scan data back to host void transferIntToHost(int N, int * odata, int * dev_odata) { hipMemcpy(odata, dev_odata, N * sizeof(int), hipMemcpyDeviceToHost); } void printDevArray(int n, int start, int* devA, bool abridged = false) { if (start >= n || !printDebug) { return; } int * copy = new int[n - start]; transferIntToHost(n - start, copy, devA + start); printf("sIdx %d: ", start); printArray2(n - start, copy, abridged); delete[] copy; } // end the scan on the device. void endScan(int * dev_idata) { hipFree(dev_idata); } void freeScanShared(const SharedScan scan){ hipFree(scan.dev_idata); hipFree(scan.scan_sum); } void freeCompaction(const CompactSupport compactSupport) { freeScanShared(compactSupport.scan); hipFree(compactSupport.bool_data); } // Kernel Reduction and downsweep in Shared Memory. TILE is the width of the TILE // and the max thread is TILE/2. __global__ void kernScanShared(int TILE, int * dev_idata, int * dev_Tilesum) { extern __shared__ int xy[]; // load data into shared memory // each thread loads two data points; int maxThreads = TILE >> 1; if (threadIdx.x >= maxThreads) { return; } // copy to shared memory 1 thread per two data points int Stride {2}; int Sharedindex = threadIdx.x * Stride; int devindex = Sharedindex + TILE * blockIdx.x; xy[Sharedindex] = dev_idata[devindex]; xy[Sharedindex + 1] = dev_idata[devindex + 1]; __syncthreads(); // do the parallel reduction int maxRed{ maxThreads }; for ( ; Stride <= TILE; Stride <<= 1, maxRed >>= 1) { int priorStride{ Stride >> 1 }; if (threadIdx.x < maxRed) { int rindex = (threadIdx.x + 1) * Stride - 1; xy[rindex] += xy[rindex - priorStride]; } __syncthreads(); //if (rindex < TILE){} } const int startOffset { TILE - 1}; // have one thread in the block copy the last element; // to scan and set that element to zero if ( threadIdx.x == 0) { *(dev_Tilesum + blockIdx.x) = xy[startOffset]; xy[startOffset] = 0; } __syncthreads(); // Now do the Downsweep to Sum elements // Stride starts at TILE maxRed = 1; for (Stride = TILE; Stride > 1; Stride >>= 1, maxRed <<= 1){ if (threadIdx.x < maxRed) { int right = -Stride * threadIdx.x + startOffset; int separation = Stride >> 1; int left = right - separation; int current = xy[right]; xy[right] += xy[left]; xy[left] = current; } //if (right >= 0) {} __syncthreads(); } // now copy back; dev_idata[devindex] = xy[Sharedindex]; dev_idata[devindex + 1] = xy[Sharedindex + 1]; __syncthreads(); } __global__ void kernAddSumToTile(int Tile, int* dev_idata, int* ScanSum) { extern __shared__ int xy[]; int maxThreads = Tile >> 1; if (threadIdx.x >= maxThreads) { return; } int Stride {2}; int Sharedindex = threadIdx.x * Stride; int devindex = Sharedindex + Tile * blockIdx.x; xy[Sharedindex] = dev_idata[devindex]; xy[Sharedindex + 1] = dev_idata[devindex + 1]; int sum { *(ScanSum + blockIdx.x)}; xy[Sharedindex] += sum; xy[Sharedindex + 1] += sum; // now copy back; dev_idata[devindex] = xy[Sharedindex]; dev_idata[devindex + 1] = xy[Sharedindex + 1]; } // kernParallelReduction uses contiguous threads to do the parallel reduction // There is one thread for every two elements __global__ void kernParallelReduction(int N, int Stride, int maxThreads, int * dev_idata) { int thread = threadIdx.x + blockIdx.x * blockDim.x; if (thread >= maxThreads) { return; } int priorStride{ Stride >> 1 }; int index = (thread + 1) * Stride - 1; if (index < N) { dev_idata[index] += dev_idata[index - priorStride]; } } // Downsweep uses contiguous threads to sweep down and add the intermediate // results to the partial sums already computed // There is one thread for every two elements. Here there is a for loop // that changes the stride. Contiguous allows the first threads to do all // the work and later warps will all be 0. __global__ void kernDownSweep(int N, int stride, int maxThreads, int * dev_idata) { int thread = threadIdx.x + blockIdx.x * blockDim.x; if (thread >= maxThreads) { return; } // have one thread set the last element to 0; int startOffset{ N - 1 }; int right = -stride * thread + startOffset; if (right >= 0) { int separation = stride >> 1; int left = right - separation; int current = dev_idata[right]; dev_idata[right] += dev_idata[left]; dev_idata[left] = current; } } inline int gridSize(int threads) { return (threads + blockSize - 1) / blockSize; } /* Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void efficientScan(int N, int d, int * dev_idata) { int maxThreads{ N >> 1 }; for (int stride = { 2 }; stride <= N; stride *= 2) { int grids{ gridSize(maxThreads) }; dim3 fullBlocksPerGrid(grids); kernParallelReduction << <fullBlocksPerGrid, blockSize >> > (N, stride, maxThreads, dev_idata); maxThreads >>= 1; } hipMemset((dev_idata + N - 1), 0, sizeof(int)); maxThreads = 1; for (int stride = { N }; stride > 1; stride >>= 1) { int grids{ gridSize(maxThreads) }; dim3 fullBlocksPerGrid(grids); // printf(" %d %d %d\n", grids, maxThreads, stride); kernDownSweep << <fullBlocksPerGrid, blockSize >> >(N, stride, maxThreads, dev_idata); maxThreads <<= 1; } hipDeviceSynchronize(); } // blocksNeeded calculates the number of scan blocks needed and // the Tile Size. N -- length of the original list that needs to // be a power of 2. Tile is the Tile size or block size that also is a // power of 2. dtile is Tile = 2^dtile. The Tile size will change // only if it is Tile > N. It produces 1 if N = 2^4 and T = 2^4 or if // N = 2^3 and T = 2^4; log2N will be greater than 0. // updates log2N and log2Tile. int blocksNeeded(int& log2N, int& log2Tile) { log2N -= log2Tile; if (log2N >= 0) { return 1 << log2N; } else {// set log2Tile to be the original log2N log2Tile += log2N; return 1; } } // int blocksNeeded(int N, int& Tile, int dtile){ // if ( Tile > N) { // Tile = N; // return 1; // } // return N >> dtile; // } // Totalblocks Needed calculates the total tiles needed for // all the scan arrays; log2N is log2(N) log2Tile is log2(Tile); int TotalBlocksNeeded(int log2N, int log2Tile) { int total{0}; log2N -= log2Tile; for (; log2N >= 0; log2N -= log2Tile) { total += 1 << log2N; } if (log2N > -log2Tile) { total += 1; } return total; } // this should also produce the total based on // the sum of a power series in 2^(log2Tile) int TotalBlocksNeeded2(int log2N, int log2Tile) { int quotient {log2N/log2Tile}; int PowerofTile = quotient * log2Tile; int rem = log2N - PowerofTile; int Total { ((1 << PowerofTile) - 1) / ((1 << log2Tile) -1)}; if (rem != 0) { Total *= 1 << rem; Total += 1; } return Total; } // will call itself recursively to produce the Total Sum of idata // Tile is a multiple of 2 N is the size of dev_idata, // 2^(dtile) = Tile, scan_sum is a preallocated array of the correct blocksize void efficientScanShared(int log2N, int log2Tile, int * dev_idata, int* scan_sum, int printoffset) { int n = (1 << log2N); // updates log2N for next iteration and log2Tile in case // this sum is less than the max printDevArray(n, printoffset, dev_idata, true); int numblocks {blocksNeeded(log2N, log2Tile)}; int maxThreads{ 1 << (log2Tile - 1) }; //maxThreads = ::max(maxThreads, 32); int Tile{ 1 << log2Tile }; dim3 numThreads ( maxThreads); dim3 numBLOCKS(numblocks); int size { sizeof(int)}; hipLaunchKernelGGL(( kernScanShared), dim3(numBLOCKS), dim3(numThreads), Tile * size , 0, Tile, dev_idata, scan_sum); checkCUDAError("find Cuda Error"); printDevArray(n, printoffset, dev_idata, true); // no need for scan_sum and total scan is correct if numblocks == 1 if (numblocks == 1) { return; } efficientScanShared( log2N, log2Tile, scan_sum, scan_sum + numblocks, 0); numBLOCKS = dim3(numblocks - 1); hipLaunchKernelGGL(( kernAddSumToTile), dim3(numBLOCKS), dim3(numThreads), Tile * size , 0, Tile, dev_idata + Tile, scan_sum + 1); checkCUDAError("find Cuda Error 2"); printDevArray(n, printoffset, dev_idata, true); //hipDeviceSynchronize(); } // init device arrays necessary to sum N items in shared Memory SharedScan initSharedScan(const int n, int tileSize) { // d is the number of scans needed and also the // upper bound for log2 of the number of elements SharedScan shared; shared.log2N = ilog2ceil(n); // int N{ 1 << shared.log2N }; // Tile should be less than or equal to N, TILEMAX, Tile // inputted tileSize = (tileSize == -1) ? ::min(MAXTILE, N): ::min({MAXTILE, N, tileSize}); tileSize = ::max(tileSize, 2); shared.log2T = ilog2(tileSize); // Tile must be a multiple of 2 to divide N so Tile // will actually be less than this // calculate Total scan size int ScanSize { TotalBlocksNeeded(shared.log2N, shared.log2T)}; //if (ScanSize != TotalBlocksNeeded2(shared.log2N, shared.log2T)){ // throw std::runtime_error("blocks needed may not be correct"); //} can check if TotalBlocksNeeded is correct. It passed each time initScanSum(ScanSize, &shared.scan_sum); initScanSum(N, &shared.dev_idata); return shared; } CompactSupport initCompactSupport(const SharedScan scan){ CompactSupport compact { scan, NULL}; int N {1 << compact.scan.log2N}; initScanSum(N, &compact.bool_data); return compact; } // does the scan but now puts void scanShared (int n, int *odata, const int *idata, int Tile) { int * dev_idata; int * scan_sum; // d is the number of scans needed and also the // upper bound for log2 of the number of elements int d{ ilog2ceil(n) }; // int N{ 1 << d }; // Tile should be less than or equal to N, TILEMAX, Tile // inputted Tile = (Tile == -1) ? ::min(MAXTILE, N): ::min({MAXTILE, N, Tile}); Tile = ::max(Tile, 2); int dtile { ilog2(Tile)}; // Tile must be a multiple of 2 to divide N so Tile // will actually be less than this // calculate Total scan size int ScanSize { TotalBlocksNeeded(d, dtile)}; //if (ScanSize != TotalBlocksNeeded2(d, dtile)){ // throw std::runtime_error("blocks needed may not be correct"); //} initScan(N, n, idata, &dev_idata); timer().startGpuTimer(); initScanSum(ScanSize, &scan_sum); efficientScanShared(d, dtile, dev_idata, scan_sum); timer().endGpuTimer(); // only transfer tho first n elements of the // exclusive scan transferIntToHost(n, odata, dev_idata); endScan(dev_idata); endScan(scan_sum); } void scan(int n, int *odata, const int *idata) { int * dev_idata; // d is the number of scans needed and also the // upper bound for log2 of the number of elements int d{ ilog2ceil(n) }; // int N{ 1 << d }; initScan(N, n, idata, &dev_idata); timer().startGpuTimer(); efficientScan(N, d, dev_idata); timer().endGpuTimer(); // only transfer tho first n elements of the // exclusive scan transferIntToHost(n, odata, dev_idata); endScan(dev_idata); } void initCompact(int N, int n, const int *idata, int ** dev_idata, int **dev_booldata, int ** dev_indices, int **dev_odata) { int size{ sizeof(int) }; hipMalloc(reinterpret_cast<void**> (dev_booldata), N * size); hipMalloc(reinterpret_cast<void**> (dev_idata), N * size); hipMalloc(reinterpret_cast<void**> (dev_indices), N * size); hipMalloc(reinterpret_cast<void**> (dev_odata), N * size); checkCUDAError("Allocating Compaction Scan Error"); hipMemset(*dev_idata, 0, N * size); hipMemcpy(*dev_idata, idata, n *size, hipMemcpyHostToDevice); // no need to initialize the odata because the loop does that each time checkCUDAError("Initialize and Copy data to target Error"); hipDeviceSynchronize(); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int * dev_idata; int * dev_booldata; int * dev_indices; int * dev_odata; // d is the number of scans needed and also the // upper bound for log2 of the number of elements int d{ ilog2ceil(n) }; // int N{ 1 << d }; initCompact(N, n, idata, &dev_idata, &dev_booldata, &dev_indices, &dev_odata); timer().startGpuTimer(); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> >(N, dev_booldata, dev_idata); hipMemcpy(dev_indices, dev_booldata, N * sizeof(int), hipMemcpyDeviceToDevice); efficientScan(N, d, dev_indices); StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> >(N, dev_odata, dev_idata, dev_booldata, dev_indices); timer().endGpuTimer(); int lastIndex; transferIntToHost(1, &lastIndex, dev_indices + N - 1); int lastIncluded; transferIntToHost(1, &lastIncluded, dev_booldata + N - 1); std::vector<int> input(n); std::vector<int> bools(n); std::vector<int> indices(n); transferIntToHost(n, input.data(), dev_idata); transferIntToHost(n, bools.data(), dev_booldata); transferIntToHost(n, indices.data(), dev_indices); printArray2(n, input.data(), true); printArray2(n, bools.data(), true); printArray2(n, indices.data(), true); n = lastIncluded + lastIndex; transferIntToHost(n, odata, dev_odata); printArray2(n, odata, true); endScan(dev_odata); endScan(dev_idata); endScan(dev_indices); endScan(dev_booldata); return n; } } }
6ba6e881ade29204e5593a0a012d816b4805e076.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" #include <vector> // Shared memory is 50 Kbyte per SM and an int is 4 Bytes so // if the TILE is greater than MAXTILE the array will not fit // in shared memory static constexpr int MAXTILE_Shared { 8192}; static constexpr int MAXTILE { ( MAXTILE_Shared < 2 * StreamCompaction::Efficient::blockSize) ? MAXTILE_Shared : 2 * StreamCompaction::Efficient::blockSize}; //static constexpr int devStart{ 0 }; //static constexpr int scanStart{ 0 }; static constexpr bool printDebug{ false }; void printArray2(int n, int *a, bool abridged = false) { printf(" [ "); for (int i = 0; i < n; i++) { if (abridged && i + 2 == 15 && n > 16) { i = n - 2; printf("... "); } printf("%3d ", a[i]); } printf("]\n"); } namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // initialize the arrays on the GPU // returns the addresses of the pointers on the GPU // dev_idata is a pointer to the address of the dev_idata array that // gets updated here // initialize dev_idata has the first // elements copied and the remainder to make the stream 2^n // are set to 0. The first input is the size of the arrays // to allocate and the second input is the size of the array to transfer. // N the maximum size of the allocated array. n is the size of the data array // N is one more than the multiple of 2 greater or equal to n, // in dev_idata, and then the elements are copied inte dev_idata. void initScan(int N, int n, const int *idata, int ** dev_idata) { int size{ sizeof(int) }; cudaMalloc(reinterpret_cast<void**>(dev_idata), N * size); checkCUDAError("Allocating Scan Buffer Efficient Error"); cudaMemcpy(static_cast<void*>(*dev_idata), idata, n *size, cudaMemcpyHostToDevice); cudaMemset(static_cast<void*>(*dev_idata + n), 0, (N - n) * size); // no need to initialize the odata because the loop does that each time checkCUDAError("Initialize and Copy data to target Error"); cudaThreadSynchronize(); } void initScanSum(int numblocks, int ** scan_sum) { int size {sizeof(int)}; cudaMalloc(reinterpret_cast<void**>(scan_sum), numblocks * size); checkCUDAError("Allocating Scan Efficient shared Error"); } // transfer scan data back to host void transferIntToHost(int N, int * odata, int * dev_odata) { cudaMemcpy(odata, dev_odata, N * sizeof(int), cudaMemcpyDeviceToHost); } void printDevArray(int n, int start, int* devA, bool abridged = false) { if (start >= n || !printDebug) { return; } int * copy = new int[n - start]; transferIntToHost(n - start, copy, devA + start); printf("sIdx %d: ", start); printArray2(n - start, copy, abridged); delete[] copy; } // end the scan on the device. void endScan(int * dev_idata) { cudaFree(dev_idata); } void freeScanShared(const SharedScan scan){ cudaFree(scan.dev_idata); cudaFree(scan.scan_sum); } void freeCompaction(const CompactSupport compactSupport) { freeScanShared(compactSupport.scan); cudaFree(compactSupport.bool_data); } // Kernel Reduction and downsweep in Shared Memory. TILE is the width of the TILE // and the max thread is TILE/2. __global__ void kernScanShared(int TILE, int * dev_idata, int * dev_Tilesum) { extern __shared__ int xy[]; // load data into shared memory // each thread loads two data points; int maxThreads = TILE >> 1; if (threadIdx.x >= maxThreads) { return; } // copy to shared memory 1 thread per two data points int Stride {2}; int Sharedindex = threadIdx.x * Stride; int devindex = Sharedindex + TILE * blockIdx.x; xy[Sharedindex] = dev_idata[devindex]; xy[Sharedindex + 1] = dev_idata[devindex + 1]; __syncthreads(); // do the parallel reduction int maxRed{ maxThreads }; for ( ; Stride <= TILE; Stride <<= 1, maxRed >>= 1) { int priorStride{ Stride >> 1 }; if (threadIdx.x < maxRed) { int rindex = (threadIdx.x + 1) * Stride - 1; xy[rindex] += xy[rindex - priorStride]; } __syncthreads(); //if (rindex < TILE){} } const int startOffset { TILE - 1}; // have one thread in the block copy the last element; // to scan and set that element to zero if ( threadIdx.x == 0) { *(dev_Tilesum + blockIdx.x) = xy[startOffset]; xy[startOffset] = 0; } __syncthreads(); // Now do the Downsweep to Sum elements // Stride starts at TILE maxRed = 1; for (Stride = TILE; Stride > 1; Stride >>= 1, maxRed <<= 1){ if (threadIdx.x < maxRed) { int right = -Stride * threadIdx.x + startOffset; int separation = Stride >> 1; int left = right - separation; int current = xy[right]; xy[right] += xy[left]; xy[left] = current; } //if (right >= 0) {} __syncthreads(); } // now copy back; dev_idata[devindex] = xy[Sharedindex]; dev_idata[devindex + 1] = xy[Sharedindex + 1]; __syncthreads(); } __global__ void kernAddSumToTile(int Tile, int* dev_idata, int* ScanSum) { extern __shared__ int xy[]; int maxThreads = Tile >> 1; if (threadIdx.x >= maxThreads) { return; } int Stride {2}; int Sharedindex = threadIdx.x * Stride; int devindex = Sharedindex + Tile * blockIdx.x; xy[Sharedindex] = dev_idata[devindex]; xy[Sharedindex + 1] = dev_idata[devindex + 1]; int sum { *(ScanSum + blockIdx.x)}; xy[Sharedindex] += sum; xy[Sharedindex + 1] += sum; // now copy back; dev_idata[devindex] = xy[Sharedindex]; dev_idata[devindex + 1] = xy[Sharedindex + 1]; } // kernParallelReduction uses contiguous threads to do the parallel reduction // There is one thread for every two elements __global__ void kernParallelReduction(int N, int Stride, int maxThreads, int * dev_idata) { int thread = threadIdx.x + blockIdx.x * blockDim.x; if (thread >= maxThreads) { return; } int priorStride{ Stride >> 1 }; int index = (thread + 1) * Stride - 1; if (index < N) { dev_idata[index] += dev_idata[index - priorStride]; } } // Downsweep uses contiguous threads to sweep down and add the intermediate // results to the partial sums already computed // There is one thread for every two elements. Here there is a for loop // that changes the stride. Contiguous allows the first threads to do all // the work and later warps will all be 0. __global__ void kernDownSweep(int N, int stride, int maxThreads, int * dev_idata) { int thread = threadIdx.x + blockIdx.x * blockDim.x; if (thread >= maxThreads) { return; } // have one thread set the last element to 0; int startOffset{ N - 1 }; int right = -stride * thread + startOffset; if (right >= 0) { int separation = stride >> 1; int left = right - separation; int current = dev_idata[right]; dev_idata[right] += dev_idata[left]; dev_idata[left] = current; } } inline int gridSize(int threads) { return (threads + blockSize - 1) / blockSize; } /* Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void efficientScan(int N, int d, int * dev_idata) { int maxThreads{ N >> 1 }; for (int stride = { 2 }; stride <= N; stride *= 2) { int grids{ gridSize(maxThreads) }; dim3 fullBlocksPerGrid(grids); kernParallelReduction << <fullBlocksPerGrid, blockSize >> > (N, stride, maxThreads, dev_idata); maxThreads >>= 1; } cudaMemset((dev_idata + N - 1), 0, sizeof(int)); maxThreads = 1; for (int stride = { N }; stride > 1; stride >>= 1) { int grids{ gridSize(maxThreads) }; dim3 fullBlocksPerGrid(grids); // printf(" %d %d %d\n", grids, maxThreads, stride); kernDownSweep << <fullBlocksPerGrid, blockSize >> >(N, stride, maxThreads, dev_idata); maxThreads <<= 1; } cudaThreadSynchronize(); } // blocksNeeded calculates the number of scan blocks needed and // the Tile Size. N -- length of the original list that needs to // be a power of 2. Tile is the Tile size or block size that also is a // power of 2. dtile is Tile = 2^dtile. The Tile size will change // only if it is Tile > N. It produces 1 if N = 2^4 and T = 2^4 or if // N = 2^3 and T = 2^4; log2N will be greater than 0. // updates log2N and log2Tile. int blocksNeeded(int& log2N, int& log2Tile) { log2N -= log2Tile; if (log2N >= 0) { return 1 << log2N; } else {// set log2Tile to be the original log2N log2Tile += log2N; return 1; } } // int blocksNeeded(int N, int& Tile, int dtile){ // if ( Tile > N) { // Tile = N; // return 1; // } // return N >> dtile; // } // Totalblocks Needed calculates the total tiles needed for // all the scan arrays; log2N is log2(N) log2Tile is log2(Tile); int TotalBlocksNeeded(int log2N, int log2Tile) { int total{0}; log2N -= log2Tile; for (; log2N >= 0; log2N -= log2Tile) { total += 1 << log2N; } if (log2N > -log2Tile) { total += 1; } return total; } // this should also produce the total based on // the sum of a power series in 2^(log2Tile) int TotalBlocksNeeded2(int log2N, int log2Tile) { int quotient {log2N/log2Tile}; int PowerofTile = quotient * log2Tile; int rem = log2N - PowerofTile; int Total { ((1 << PowerofTile) - 1) / ((1 << log2Tile) -1)}; if (rem != 0) { Total *= 1 << rem; Total += 1; } return Total; } // will call itself recursively to produce the Total Sum of idata // Tile is a multiple of 2 N is the size of dev_idata, // 2^(dtile) = Tile, scan_sum is a preallocated array of the correct blocksize void efficientScanShared(int log2N, int log2Tile, int * dev_idata, int* scan_sum, int printoffset) { int n = (1 << log2N); // updates log2N for next iteration and log2Tile in case // this sum is less than the max printDevArray(n, printoffset, dev_idata, true); int numblocks {blocksNeeded(log2N, log2Tile)}; int maxThreads{ 1 << (log2Tile - 1) }; //maxThreads = std::max(maxThreads, 32); int Tile{ 1 << log2Tile }; dim3 numThreads ( maxThreads); dim3 numBLOCKS(numblocks); int size { sizeof(int)}; kernScanShared<<< numBLOCKS, numThreads, Tile * size >>>( Tile, dev_idata, scan_sum); checkCUDAError("find Cuda Error"); printDevArray(n, printoffset, dev_idata, true); // no need for scan_sum and total scan is correct if numblocks == 1 if (numblocks == 1) { return; } efficientScanShared( log2N, log2Tile, scan_sum, scan_sum + numblocks, 0); numBLOCKS = dim3(numblocks - 1); kernAddSumToTile<<< numBLOCKS, numThreads, Tile * size >>>( Tile, dev_idata + Tile, scan_sum + 1); checkCUDAError("find Cuda Error 2"); printDevArray(n, printoffset, dev_idata, true); //cudaThreadSynchronize(); } // init device arrays necessary to sum N items in shared Memory SharedScan initSharedScan(const int n, int tileSize) { // d is the number of scans needed and also the // upper bound for log2 of the number of elements SharedScan shared; shared.log2N = ilog2ceil(n); // int N{ 1 << shared.log2N }; // Tile should be less than or equal to N, TILEMAX, Tile // inputted tileSize = (tileSize == -1) ? std::min(MAXTILE, N): std::min({MAXTILE, N, tileSize}); tileSize = std::max(tileSize, 2); shared.log2T = ilog2(tileSize); // Tile must be a multiple of 2 to divide N so Tile // will actually be less than this // calculate Total scan size int ScanSize { TotalBlocksNeeded(shared.log2N, shared.log2T)}; //if (ScanSize != TotalBlocksNeeded2(shared.log2N, shared.log2T)){ // throw std::runtime_error("blocks needed may not be correct"); //} can check if TotalBlocksNeeded is correct. It passed each time initScanSum(ScanSize, &shared.scan_sum); initScanSum(N, &shared.dev_idata); return shared; } CompactSupport initCompactSupport(const SharedScan scan){ CompactSupport compact { scan, NULL}; int N {1 << compact.scan.log2N}; initScanSum(N, &compact.bool_data); return compact; } // does the scan but now puts void scanShared (int n, int *odata, const int *idata, int Tile) { int * dev_idata; int * scan_sum; // d is the number of scans needed and also the // upper bound for log2 of the number of elements int d{ ilog2ceil(n) }; // int N{ 1 << d }; // Tile should be less than or equal to N, TILEMAX, Tile // inputted Tile = (Tile == -1) ? std::min(MAXTILE, N): std::min({MAXTILE, N, Tile}); Tile = std::max(Tile, 2); int dtile { ilog2(Tile)}; // Tile must be a multiple of 2 to divide N so Tile // will actually be less than this // calculate Total scan size int ScanSize { TotalBlocksNeeded(d, dtile)}; //if (ScanSize != TotalBlocksNeeded2(d, dtile)){ // throw std::runtime_error("blocks needed may not be correct"); //} initScan(N, n, idata, &dev_idata); timer().startGpuTimer(); initScanSum(ScanSize, &scan_sum); efficientScanShared(d, dtile, dev_idata, scan_sum); timer().endGpuTimer(); // only transfer tho first n elements of the // exclusive scan transferIntToHost(n, odata, dev_idata); endScan(dev_idata); endScan(scan_sum); } void scan(int n, int *odata, const int *idata) { int * dev_idata; // d is the number of scans needed and also the // upper bound for log2 of the number of elements int d{ ilog2ceil(n) }; // int N{ 1 << d }; initScan(N, n, idata, &dev_idata); timer().startGpuTimer(); efficientScan(N, d, dev_idata); timer().endGpuTimer(); // only transfer tho first n elements of the // exclusive scan transferIntToHost(n, odata, dev_idata); endScan(dev_idata); } void initCompact(int N, int n, const int *idata, int ** dev_idata, int **dev_booldata, int ** dev_indices, int **dev_odata) { int size{ sizeof(int) }; cudaMalloc(reinterpret_cast<void**> (dev_booldata), N * size); cudaMalloc(reinterpret_cast<void**> (dev_idata), N * size); cudaMalloc(reinterpret_cast<void**> (dev_indices), N * size); cudaMalloc(reinterpret_cast<void**> (dev_odata), N * size); checkCUDAError("Allocating Compaction Scan Error"); cudaMemset(*dev_idata, 0, N * size); cudaMemcpy(*dev_idata, idata, n *size, cudaMemcpyHostToDevice); // no need to initialize the odata because the loop does that each time checkCUDAError("Initialize and Copy data to target Error"); cudaThreadSynchronize(); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int * dev_idata; int * dev_booldata; int * dev_indices; int * dev_odata; // d is the number of scans needed and also the // upper bound for log2 of the number of elements int d{ ilog2ceil(n) }; // int N{ 1 << d }; initCompact(N, n, idata, &dev_idata, &dev_booldata, &dev_indices, &dev_odata); timer().startGpuTimer(); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> >(N, dev_booldata, dev_idata); cudaMemcpy(dev_indices, dev_booldata, N * sizeof(int), cudaMemcpyDeviceToDevice); efficientScan(N, d, dev_indices); StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> >(N, dev_odata, dev_idata, dev_booldata, dev_indices); timer().endGpuTimer(); int lastIndex; transferIntToHost(1, &lastIndex, dev_indices + N - 1); int lastIncluded; transferIntToHost(1, &lastIncluded, dev_booldata + N - 1); std::vector<int> input(n); std::vector<int> bools(n); std::vector<int> indices(n); transferIntToHost(n, input.data(), dev_idata); transferIntToHost(n, bools.data(), dev_booldata); transferIntToHost(n, indices.data(), dev_indices); printArray2(n, input.data(), true); printArray2(n, bools.data(), true); printArray2(n, indices.data(), true); n = lastIncluded + lastIndex; transferIntToHost(n, odata, dev_odata); printArray2(n, odata, true); endScan(dev_odata); endScan(dev_idata); endScan(dev_indices); endScan(dev_booldata); return n; } } }
b25fb69a60dd1a05687abace5feabc7102f835d2.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2017 NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <hip/hip_runtime.h> #include <stdio.h> //------------------------------------------------------------------------------ // Return ceil(x/y) for integers x and y inline int idivCeil( int x, int y ) { return (x + y-1)/y; } //------------------------------------------------------------------------------ __global__ void createRaysOrthoKernel(float4* rays, int width, int height, float x0, float y0, float z, float dx, float dy ) { int rayx = threadIdx.x + blockIdx.x*blockDim.x; int rayy = threadIdx.y + blockIdx.y*blockDim.y; if( rayx >= width || rayy >= height ) return; int idx = rayx + rayy*width; rays[2*idx+0] = make_float4( x0+rayx*dx, y0+rayy*dy, z, 0 ); // origin, tmin rays[2*idx+1] = make_float4( 0, 0, 1, 1e34f ); // dir, tmax } //------------------------------------------------------------------------------ extern "C" void createRaysOrthoOnDevice( float4* rays, int width, int height, float x0, float y0, float z, float dx, float dy ) { dim3 blockSize( 32, 32 ); dim3 gridSize( idivCeil( width, blockSize.x ), idivCeil( height, blockSize.y ) ); hipLaunchKernelGGL(( createRaysOrthoKernel), dim3(gridSize),dim3(blockSize), 0, 0, rays, width, height, x0, y0, z, dx, dy ); } //------------------------------------------------------------------------------ extern "C" void createRaysOrthoInterleavedOnDevice( float4* rays, int width, int height, float x0, float y0, float z, float dx, float dy, int yOffset, int yStride ) { int lines = idivCeil( (height-yOffset), yStride ); dim3 blockSize( 32, 32 ); dim3 gridSize( idivCeil( width, blockSize.x ), idivCeil( lines, blockSize.y ) ); hipLaunchKernelGGL(( createRaysOrthoKernel), dim3(gridSize),dim3(blockSize), 0, 0, rays, width, lines, x0, y0+dy*yOffset, z, dx, dy*yStride ); } //------------------------------------------------------------------------------ __global__ void translateRaysKernel(float4* rays, int count, float3 offset) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if( idx >= count ) return; float4 prev = rays[2*idx+0]; rays[2*idx+0] = make_float4( prev.x + offset.x, prev.y + offset.y, prev.z + offset.z, prev.w ); // origin, tmin } //------------------------------------------------------------------------------ extern "C" void translateRaysOnDevice(float4* rays, size_t count, float3 offset) { int blockSize = 1024; int blockCount = idivCeil((int)count, blockSize); hipLaunchKernelGGL(( translateRaysKernel), dim3(blockCount),dim3(blockSize), 0, 0, rays, (int)count, offset ); }
b25fb69a60dd1a05687abace5feabc7102f835d2.cu
/* * Copyright (c) 2017 NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cuda_runtime.h> #include <stdio.h> //------------------------------------------------------------------------------ // Return ceil(x/y) for integers x and y inline int idivCeil( int x, int y ) { return (x + y-1)/y; } //------------------------------------------------------------------------------ __global__ void createRaysOrthoKernel(float4* rays, int width, int height, float x0, float y0, float z, float dx, float dy ) { int rayx = threadIdx.x + blockIdx.x*blockDim.x; int rayy = threadIdx.y + blockIdx.y*blockDim.y; if( rayx >= width || rayy >= height ) return; int idx = rayx + rayy*width; rays[2*idx+0] = make_float4( x0+rayx*dx, y0+rayy*dy, z, 0 ); // origin, tmin rays[2*idx+1] = make_float4( 0, 0, 1, 1e34f ); // dir, tmax } //------------------------------------------------------------------------------ extern "C" void createRaysOrthoOnDevice( float4* rays, int width, int height, float x0, float y0, float z, float dx, float dy ) { dim3 blockSize( 32, 32 ); dim3 gridSize( idivCeil( width, blockSize.x ), idivCeil( height, blockSize.y ) ); createRaysOrthoKernel<<<gridSize,blockSize>>>( rays, width, height, x0, y0, z, dx, dy ); } //------------------------------------------------------------------------------ extern "C" void createRaysOrthoInterleavedOnDevice( float4* rays, int width, int height, float x0, float y0, float z, float dx, float dy, int yOffset, int yStride ) { int lines = idivCeil( (height-yOffset), yStride ); dim3 blockSize( 32, 32 ); dim3 gridSize( idivCeil( width, blockSize.x ), idivCeil( lines, blockSize.y ) ); createRaysOrthoKernel<<<gridSize,blockSize>>>( rays, width, lines, x0, y0+dy*yOffset, z, dx, dy*yStride ); } //------------------------------------------------------------------------------ __global__ void translateRaysKernel(float4* rays, int count, float3 offset) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if( idx >= count ) return; float4 prev = rays[2*idx+0]; rays[2*idx+0] = make_float4( prev.x + offset.x, prev.y + offset.y, prev.z + offset.z, prev.w ); // origin, tmin } //------------------------------------------------------------------------------ extern "C" void translateRaysOnDevice(float4* rays, size_t count, float3 offset) { int blockSize = 1024; int blockCount = idivCeil((int)count, blockSize); translateRaysKernel<<<blockCount,blockSize>>>( rays, (int)count, offset ); }
a0dd0411f1685d16bdd0bb5518a4572e6fb68113.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define COMPLEX /******************************************************************************/ __global__ void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx, magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ magmaDoubleComplex scale; __shared__ double xnorm; magmaDoubleComplex dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; #ifdef REAL double alpha = *dx0; double alphai = MAGMA_Z_ZERO; if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 1 ) #else magmaDoubleComplex alpha = *dx0; double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha); if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 0 ) #endif { *dtau = MAGMA_Z_ZERO; *dA = *dx0; } else { #ifdef REAL // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = (beta - alpha) / beta; //*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha *dA = beta; } scale = 1. / (alpha - beta); #else // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_Z_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha *dA = MAGMA_Z_MAKE(beta, 0.); } alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha)); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_Z_MUL(dxi, scale); if (j < it) { *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_Z_MAKE(0., 0.); } } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magma_queue_t queue ) { dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) ); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_zlarfgx_gpu_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , n, dx0, dx, dtau, dxnorm, dA, iter); } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgtx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magmaDoubleComplex_ptr V, magma_int_t ldv, magmaDoubleComplex_ptr T, magma_int_t ldt, magmaDoubleComplex_ptr dwork, magma_queue_t queue ) { /* Generate the elementary reflector H(iter) */ magma_zlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter, queue); if (iter == 0) { magmaDoubleComplex tt = MAGMA_Z_ONE; magmablas_zlacpy( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue ); magma_zsetmatrix( 1, 1, &tt, 1, dx0, 1, queue ); } else { /* Compute the iter-th column of T */ hipLaunchKernelGGL(( magma_zgemv_kernel3) , dim3(iter), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , n, V, ldv, dx0, dwork, dtau ); hipLaunchKernelGGL(( magma_ztrmv_kernel2) , dim3(iter), dim3(iter), 0, queue->cuda_stream() , T, ldt, dwork, T+iter*ldt, dtau ); } }
a0dd0411f1685d16bdd0bb5518a4572e6fb68113.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define COMPLEX /******************************************************************************/ __global__ void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx, magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ magmaDoubleComplex scale; __shared__ double xnorm; magmaDoubleComplex dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; #ifdef REAL double alpha = *dx0; double alphai = MAGMA_Z_ZERO; if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 1 ) #else magmaDoubleComplex alpha = *dx0; double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha); if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 0 ) #endif { *dtau = MAGMA_Z_ZERO; *dA = *dx0; } else { #ifdef REAL // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = (beta - alpha) / beta; //*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha *dA = beta; } scale = 1. / (alpha - beta); #else // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_Z_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha *dA = MAGMA_Z_MAKE(beta, 0.); } alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha)); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_Z_MUL(dxi, scale); if (j < it) { *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_Z_MAKE(0., 0.); } } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ) = ±dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magma_queue_t queue ) { dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) ); dim3 threads( BLOCK_SIZE ); magma_zlarfgx_gpu_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( n, dx0, dx, dtau, dxnorm, dA, iter); } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ) = ±dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgtx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magmaDoubleComplex_ptr V, magma_int_t ldv, magmaDoubleComplex_ptr T, magma_int_t ldt, magmaDoubleComplex_ptr dwork, magma_queue_t queue ) { /* Generate the elementary reflector H(iter) */ magma_zlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter, queue); if (iter == 0) { magmaDoubleComplex tt = MAGMA_Z_ONE; magmablas_zlacpy( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue ); magma_zsetmatrix( 1, 1, &tt, 1, dx0, 1, queue ); } else { /* Compute the iter-th column of T */ magma_zgemv_kernel3 <<< iter, BLOCK_SIZE, 0, queue->cuda_stream() >>> ( n, V, ldv, dx0, dwork, dtau ); magma_ztrmv_kernel2 <<< iter, iter, 0, queue->cuda_stream() >>> ( T, ldt, dwork, T+iter*ldt, dtau ); } }
17869af25ae183b9a9ace71a0659e1a8346c4d5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tfcc_cudacomparisoninterface.h" #include "exceptions/tfcc_cudaruntimeerror.h" #include "framework/tfcc_cudasession.h" #include "framework/tfcc_session.h" #include "framework/tfcc_types.h" namespace tfcc { // cuda functions template <class T> static void __global__ _cuda_equal(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] == b ? 1 : 0; } template <class T> static void __global__ _cuda_unequal(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] != b ? 1 : 0; } template <class T> static void __global__ _cuda_greater(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] > b ? 1 : 0; } template <class T> static void __global__ _cuda_greater_equal(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] >= b ? 1 : 0; } template <class T> static void __global__ _cuda_less(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] < b ? 1 : 0; } template <class T> static void __global__ _cuda_less_equal(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] <= b ? 1 : 0; } // class function template <class T> CUDAComparisonInterface<T>::CUDAComparisonInterface(const CUDADeviceProperty& property) : _property(property) { } template <class T> CUDAComparisonInterface<T>::~CUDAComparisonInterface() { } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::equal(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); hipLaunchKernelGGL(( _cuda_equal), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), a.data(), a.size(), b, result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::unequal(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); hipLaunchKernelGGL(( _cuda_unequal), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), a.data(), a.size(), b, result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::greater(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); hipLaunchKernelGGL(( _cuda_greater), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), a.data(), a.size(), b, result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::greaterEqual(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); hipLaunchKernelGGL(( _cuda_greater_equal), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), a.data(), a.size(), b, result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::less(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); hipLaunchKernelGGL(( _cuda_less), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), a.data(), a.size(), b, result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::lessEqual(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); hipLaunchKernelGGL(( _cuda_less_equal), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), a.data(), a.size(), b, result.data()); hipError_t ret = hipGetLastError(); if (ret != hipSuccess) throw CUDARuntimeError(ret); return result; } #define DEFINE_FUNC(type) template class CUDAComparisonInterface<type>; TFCC_FOR_ALL_TYPES(DEFINE_FUNC); } // namespace tfcc
17869af25ae183b9a9ace71a0659e1a8346c4d5e.cu
#include "tfcc_cudacomparisoninterface.h" #include "exceptions/tfcc_cudaruntimeerror.h" #include "framework/tfcc_cudasession.h" #include "framework/tfcc_session.h" #include "framework/tfcc_types.h" namespace tfcc { // cuda functions template <class T> static void __global__ _cuda_equal(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] == b ? 1 : 0; } template <class T> static void __global__ _cuda_unequal(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] != b ? 1 : 0; } template <class T> static void __global__ _cuda_greater(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] > b ? 1 : 0; } template <class T> static void __global__ _cuda_greater_equal(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] >= b ? 1 : 0; } template <class T> static void __global__ _cuda_less(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] < b ? 1 : 0; } template <class T> static void __global__ _cuda_less_equal(const T* a, unsigned total, T b, uint8_t* result) { const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x; const unsigned skip = blockDim.x * gridDim.x; for (unsigned i = tid; i < total; i += skip) result[i] = a[i] <= b ? 1 : 0; } // class function template <class T> CUDAComparisonInterface<T>::CUDAComparisonInterface(const CUDADeviceProperty& property) : _property(property) { } template <class T> CUDAComparisonInterface<T>::~CUDAComparisonInterface() { } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::equal(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); _cuda_equal<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( a.data(), a.size(), b, result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::unequal(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); _cuda_unequal<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( a.data(), a.size(), b, result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::greater(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); _cuda_greater<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( a.data(), a.size(), b, result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::greaterEqual(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); _cuda_greater_equal<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( a.data(), a.size(), b, result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::less(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); _cuda_less<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( a.data(), a.size(), b, result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } template <class T> Variable<uint8_t> CUDAComparisonInterface<T>::lessEqual(const Tensor<T>& a, T b) { Variable<uint8_t> result(a.shape()); CUDASession* session = static_cast<CUDASession*>(Session::getThreadDefault()); size_t blockCount, threadCount; std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size()); _cuda_less_equal<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( a.data(), a.size(), b, result.data()); cudaError_t ret = cudaGetLastError(); if (ret != cudaSuccess) throw CUDARuntimeError(ret); return result; } #define DEFINE_FUNC(type) template class CUDAComparisonInterface<type>; TFCC_FOR_ALL_TYPES(DEFINE_FUNC); } // namespace tfcc
5fc8a17c6324dfada5e2933acdfa4c7c7e1ea73f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv2/core/core.hpp> #include "opencv2/highgui/highgui.hpp" #include <iostream> #include "pixel.h" __global__ void gray_scale_kernel(pixel *in, float *gray, unsigned col, unsigned row){ unsigned c = threadIdx.x+(blockIdx.x*blockDim.x); unsigned r = threadIdx.y+(blockIdx.y*blockDim.y); if(c<col and r<row){ unsigned id = (r*col)+c; gray[id] = (0.299*in[id].get_r())+ (0.587*in[id].get_g())+ (0.114*in[id].get_b()); } } void gray_scale(pixel *in, float *h_out, unsigned col, unsigned row){ unsigned msize = col*row*sizeof(pixel); // pixel vector unsigned rsize = col*row*sizeof(float); // float vector pixel *d_in; float *d_out; hipMalloc((void **)&d_in, msize); hipMalloc((void **)&d_out, rsize); hipMemcpy(d_in, in, msize, hipMemcpyHostToDevice); unsigned block = 16; dim3 dimGrid(ceil(col/block), ceil(row/block), 1); dim3 dimBlock(block, block, 1); hipLaunchKernelGGL(( gray_scale_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_in, d_out, col, row); hipMemcpy(h_out, d_out, rsize, hipMemcpyDeviceToHost); hipFree(d_in); hipFree(d_out); } int main(int argc, char const *argv[]){ cv::Mat src_in = cv::imread("lena.png", cv::IMREAD_COLOR); if(src_in.empty()){ std::cout << "Error : Image cannot be loaded..!!n"; return 0; } cv::namedWindow("window_in", CV_WINDOW_NORMAL); cv::imshow("window_in", src_in); unsigned cols = src_in.cols; unsigned rows = src_in.rows; std::cout << "cols: " << cols << "\n"; std::cout << "rows: " << rows << "\n"; pixel *in = new pixel[cols*rows]; float *out = new float[cols*rows]; unsigned tmp, i, j; for(i=0; i<rows; i++){ for(j=0; j<cols; j++){ in[(i*cols)+j].set_r((float)src_in.at<cv::Vec3b>(i, j)[0]); in[(i*cols)+j].set_g((float)src_in.at<cv::Vec3b>(i, j)[1]); in[(i*cols)+j].set_b((float)src_in.at<cv::Vec3b>(i, j)[2]); } } gray_scale(in, out, cols, rows); cv::Mat src_out(rows,cols, CV_8UC3, cv::Scalar(0,0,0)); for(i=0; i<rows; i++){ for(j=0; j<cols; j++){ tmp = (i*cols)+j; src_out.at<cv::Vec3b>(i, j)[0] = out[tmp]; src_out.at<cv::Vec3b>(i, j)[1] = out[tmp]; src_out.at<cv::Vec3b>(i, j)[2] = out[tmp]; } } cv::namedWindow("window_out", CV_WINDOW_NORMAL); cv::imshow("window_out", src_out); cv::waitKey(0); cv::destroyWindow("window_in"); cv::destroyWindow("window_out"); delete in; delete out; return 0; }
5fc8a17c6324dfada5e2933acdfa4c7c7e1ea73f.cu
#include <opencv2/core/core.hpp> #include "opencv2/highgui/highgui.hpp" #include <iostream> #include "pixel.h" __global__ void gray_scale_kernel(pixel *in, float *gray, unsigned col, unsigned row){ unsigned c = threadIdx.x+(blockIdx.x*blockDim.x); unsigned r = threadIdx.y+(blockIdx.y*blockDim.y); if(c<col and r<row){ unsigned id = (r*col)+c; gray[id] = (0.299*in[id].get_r())+ (0.587*in[id].get_g())+ (0.114*in[id].get_b()); } } void gray_scale(pixel *in, float *h_out, unsigned col, unsigned row){ unsigned msize = col*row*sizeof(pixel); // pixel vector unsigned rsize = col*row*sizeof(float); // float vector pixel *d_in; float *d_out; cudaMalloc((void **)&d_in, msize); cudaMalloc((void **)&d_out, rsize); cudaMemcpy(d_in, in, msize, cudaMemcpyHostToDevice); unsigned block = 16; dim3 dimGrid(ceil(col/block), ceil(row/block), 1); dim3 dimBlock(block, block, 1); gray_scale_kernel<<<dimGrid, dimBlock>>>(d_in, d_out, col, row); cudaMemcpy(h_out, d_out, rsize, cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out); } int main(int argc, char const *argv[]){ cv::Mat src_in = cv::imread("lena.png", cv::IMREAD_COLOR); if(src_in.empty()){ std::cout << "Error : Image cannot be loaded..!!n"; return 0; } cv::namedWindow("window_in", CV_WINDOW_NORMAL); cv::imshow("window_in", src_in); unsigned cols = src_in.cols; unsigned rows = src_in.rows; std::cout << "cols: " << cols << "\n"; std::cout << "rows: " << rows << "\n"; pixel *in = new pixel[cols*rows]; float *out = new float[cols*rows]; unsigned tmp, i, j; for(i=0; i<rows; i++){ for(j=0; j<cols; j++){ in[(i*cols)+j].set_r((float)src_in.at<cv::Vec3b>(i, j)[0]); in[(i*cols)+j].set_g((float)src_in.at<cv::Vec3b>(i, j)[1]); in[(i*cols)+j].set_b((float)src_in.at<cv::Vec3b>(i, j)[2]); } } gray_scale(in, out, cols, rows); cv::Mat src_out(rows,cols, CV_8UC3, cv::Scalar(0,0,0)); for(i=0; i<rows; i++){ for(j=0; j<cols; j++){ tmp = (i*cols)+j; src_out.at<cv::Vec3b>(i, j)[0] = out[tmp]; src_out.at<cv::Vec3b>(i, j)[1] = out[tmp]; src_out.at<cv::Vec3b>(i, j)[2] = out[tmp]; } } cv::namedWindow("window_out", CV_WINDOW_NORMAL); cv::imshow("window_out", src_out); cv::waitKey(0); cv::destroyWindow("window_in"); cv::destroyWindow("window_out"); delete in; delete out; return 0; }
e28a7b4f631d3531b7cbd494a10dac3a7e5c1c1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // RUN: %clang_cc1 %s --std=c++11 -triple nvptx-unknown-unknown -fcuda-is-device -emit-llvm -o - -verify // Note: This test won't work with -fsyntax-only, because some of these errors // are emitted during codegen. #include "Inputs/cuda.h" extern "C" void host_fn() {} // expected-note@-1 {{'host_fn' declared here}} // expected-note@-2 {{'host_fn' declared here}} // expected-note@-3 {{'host_fn' declared here}} // expected-note@-4 {{'host_fn' declared here}} // expected-note@-5 {{'host_fn' declared here}} // expected-note@-6 {{'host_fn' declared here}} // expected-note@-7 {{'host_fn' declared here}} struct Dummy {}; struct S { S() {} // expected-note@-1 {{'S' declared here}} // expected-note@-2 {{'S' declared here}} ~S() { host_fn(); } // expected-note@-1 {{'~S' declared here}} int x; }; struct T { __host__ __device__ void hd() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} // No error; this is (implicitly) inline and is never called, so isn't // codegen'ed. __host__ __device__ void hd2() { host_fn(); } __host__ __device__ void hd3(); void h() {} // expected-note@-1 {{'h' declared here}} void operator+(); // expected-note@-1 {{'operator+' declared here}} void operator-(const T&) {} // expected-note@-1 {{'operator-' declared here}} operator Dummy() { return Dummy(); } // expected-note@-1 {{'operator Dummy' declared here}} __host__ void operator delete(void*); __device__ void operator delete(void*, size_t); }; struct U { __device__ void operator delete(void*, size_t) = delete; __host__ __device__ void operator delete(void*); }; __host__ __device__ void T::hd3() { host_fn(); // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} } template <typename T> __host__ __device__ void hd2() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} __global__ void kernel() { hd2<int>(); } __host__ __device__ void hd() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} template <typename T> __host__ __device__ void hd3() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} __device__ void device_fn() { hd3<int>(); } // No error because this is never instantiated. template <typename T> __host__ __device__ void hd4() { host_fn(); } __host__ __device__ void local_var() { S s; // expected-error@-1 {{reference to __host__ function 'S' in __host__ __device__ function}} } __host__ __device__ void placement_new(char *ptr) { ::new(ptr) S(); // expected-error@-1 {{reference to __host__ function 'S' in __host__ __device__ function}} } __host__ __device__ void explicit_destructor(S *s) { s->~S(); // expected-error@-1 {{reference to __host__ function '~S' in __host__ __device__ function}} } __host__ __device__ void class_specific_delete(T *t, U *u) { delete t; // ok, call sized device delete even though host has preferable non-sized version delete u; // ok, call non-sized HD delete rather than sized D delete } __host__ __device__ void hd_member_fn() { T t; // Necessary to trigger an error on T::hd. It's (implicitly) inline, so // isn't codegen'ed until we call it. t.hd(); } __host__ __device__ void h_member_fn() { T t; t.h(); // expected-error@-1 {{reference to __host__ function 'h' in __host__ __device__ function}} } __host__ __device__ void fn_ptr() { auto* ptr = &host_fn; // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} } template <typename T> __host__ __device__ void fn_ptr_template() { auto* ptr = &host_fn; // Not an error because the template isn't instantiated. } __host__ __device__ void unaryOp() { T t; (void) +t; // expected-error {{reference to __host__ function 'operator+' in __host__ __device__ function}} } __host__ __device__ void binaryOp() { T t; (void) (t - t); // expected-error {{reference to __host__ function 'operator-' in __host__ __device__ function}} } __host__ __device__ void implicitConversion() { T t; Dummy d = t; // expected-error {{reference to __host__ function 'operator Dummy' in __host__ __device__ function}} } template <typename T> struct TmplStruct { template <typename U> __host__ __device__ void fn() {} }; template <> template <> __host__ __device__ void TmplStruct<int>::fn<int>() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} __device__ void double_specialization() { TmplStruct<int>().fn<int>(); }
e28a7b4f631d3531b7cbd494a10dac3a7e5c1c1c.cu
// RUN: %clang_cc1 %s --std=c++11 -triple nvptx-unknown-unknown -fcuda-is-device -emit-llvm -o - -verify // Note: This test won't work with -fsyntax-only, because some of these errors // are emitted during codegen. #include "Inputs/cuda.h" extern "C" void host_fn() {} // expected-note@-1 {{'host_fn' declared here}} // expected-note@-2 {{'host_fn' declared here}} // expected-note@-3 {{'host_fn' declared here}} // expected-note@-4 {{'host_fn' declared here}} // expected-note@-5 {{'host_fn' declared here}} // expected-note@-6 {{'host_fn' declared here}} // expected-note@-7 {{'host_fn' declared here}} struct Dummy {}; struct S { S() {} // expected-note@-1 {{'S' declared here}} // expected-note@-2 {{'S' declared here}} ~S() { host_fn(); } // expected-note@-1 {{'~S' declared here}} int x; }; struct T { __host__ __device__ void hd() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} // No error; this is (implicitly) inline and is never called, so isn't // codegen'ed. __host__ __device__ void hd2() { host_fn(); } __host__ __device__ void hd3(); void h() {} // expected-note@-1 {{'h' declared here}} void operator+(); // expected-note@-1 {{'operator+' declared here}} void operator-(const T&) {} // expected-note@-1 {{'operator-' declared here}} operator Dummy() { return Dummy(); } // expected-note@-1 {{'operator Dummy' declared here}} __host__ void operator delete(void*); __device__ void operator delete(void*, size_t); }; struct U { __device__ void operator delete(void*, size_t) = delete; __host__ __device__ void operator delete(void*); }; __host__ __device__ void T::hd3() { host_fn(); // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} } template <typename T> __host__ __device__ void hd2() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} __global__ void kernel() { hd2<int>(); } __host__ __device__ void hd() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} template <typename T> __host__ __device__ void hd3() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} __device__ void device_fn() { hd3<int>(); } // No error because this is never instantiated. template <typename T> __host__ __device__ void hd4() { host_fn(); } __host__ __device__ void local_var() { S s; // expected-error@-1 {{reference to __host__ function 'S' in __host__ __device__ function}} } __host__ __device__ void placement_new(char *ptr) { ::new(ptr) S(); // expected-error@-1 {{reference to __host__ function 'S' in __host__ __device__ function}} } __host__ __device__ void explicit_destructor(S *s) { s->~S(); // expected-error@-1 {{reference to __host__ function '~S' in __host__ __device__ function}} } __host__ __device__ void class_specific_delete(T *t, U *u) { delete t; // ok, call sized device delete even though host has preferable non-sized version delete u; // ok, call non-sized HD delete rather than sized D delete } __host__ __device__ void hd_member_fn() { T t; // Necessary to trigger an error on T::hd. It's (implicitly) inline, so // isn't codegen'ed until we call it. t.hd(); } __host__ __device__ void h_member_fn() { T t; t.h(); // expected-error@-1 {{reference to __host__ function 'h' in __host__ __device__ function}} } __host__ __device__ void fn_ptr() { auto* ptr = &host_fn; // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} } template <typename T> __host__ __device__ void fn_ptr_template() { auto* ptr = &host_fn; // Not an error because the template isn't instantiated. } __host__ __device__ void unaryOp() { T t; (void) +t; // expected-error {{reference to __host__ function 'operator+' in __host__ __device__ function}} } __host__ __device__ void binaryOp() { T t; (void) (t - t); // expected-error {{reference to __host__ function 'operator-' in __host__ __device__ function}} } __host__ __device__ void implicitConversion() { T t; Dummy d = t; // expected-error {{reference to __host__ function 'operator Dummy' in __host__ __device__ function}} } template <typename T> struct TmplStruct { template <typename U> __host__ __device__ void fn() {} }; template <> template <> __host__ __device__ void TmplStruct<int>::fn<int>() { host_fn(); } // expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}} __device__ void double_specialization() { TmplStruct<int>().fn<int>(); }
78bd98baa034a2f6bc6051be5295b02b89c66243.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _VECTOR_REDUCTION_KERNEL_H_ #define _VECTOR_REDUCTION_KERNEL_H_ #define THREAD_BLOCK_SIZE 256 /* Size of a thread block. */ #define NUM_BLOCKS 240 /* Number of thread blocks. */ __global__ void vector_reduction_kernel(float *A, float *C, unsigned int num_elements) { __shared__ float sum_per_thread[THREAD_BLOCK_SIZE]; // Allocate shared memory to hold the partial sums. unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; // Obtain the thread ID. unsigned int stride = blockDim.x * gridDim.x; double sum = 0.0f; unsigned int i = thread_id; /* Compute your partial sum. */ while(i < num_elements){ sum += (double)A[i]; i += stride; } sum_per_thread[threadIdx.x] = (float)sum; // Copy sum to shared memory. __syncthreads(); // Wait for all threads in the thread block to finish up. /* Reduce the values generated by the thread block to a single value to be sent back to the CPU. The following code assumes that the number of threads per block is power of two. */ i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i) sum_per_thread[threadIdx.x] += sum_per_thread[threadIdx.x + i]; __syncthreads(); i /= 2; } /* Write the partial sum computed by this thread block to global memory. */ if(threadIdx.x == 0) C[blockIdx.x] = sum_per_thread[0]; } /* This function uses a compare and swap technique to acquire a mutex/lock. */ __device__ void lock(int *mutex) { while(atomicCAS(mutex, 0, 1) != 0); } /* This function uses an atomic exchange operation to release the mutex/lock. */ __device__ void unlock(int *mutex) { atomicExch(mutex, 0); } __global__ void vector_reduction_kernel_using_atomics(float *A, float *result, unsigned int num_elements, int *mutex) { __shared__ float sum_per_thread[THREAD_BLOCK_SIZE]; unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; // Obtain the index of the thread. unsigned int stride = blockDim.x * gridDim.x; double sum = 0.0f; unsigned int i = thread_id; /* Generate the partial sum. */ while(i < num_elements){ sum += (double)A[i]; i += stride; } sum_per_thread[threadIdx.x] = (float)sum; // Copy sum to shared memory. __syncthreads(); // Wait for all thread in the thread block to finish. /* Reduce the values generated by the thread block to a single value. We assume that the number of threads per block is power of two. */ i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i) sum_per_thread[threadIdx.x] += sum_per_thread[threadIdx.x + i]; __syncthreads(); i /= 2; } /* Accumulate the sum computed by this thread block into the global shared variable. */ if(threadIdx.x == 0){ lock(mutex); *result += sum_per_thread[0]; unlock(mutex); } } #endif // #ifndef _VECTOR_REDUCTION_KERNEL_H
78bd98baa034a2f6bc6051be5295b02b89c66243.cu
#ifndef _VECTOR_REDUCTION_KERNEL_H_ #define _VECTOR_REDUCTION_KERNEL_H_ #define THREAD_BLOCK_SIZE 256 /* Size of a thread block. */ #define NUM_BLOCKS 240 /* Number of thread blocks. */ __global__ void vector_reduction_kernel(float *A, float *C, unsigned int num_elements) { __shared__ float sum_per_thread[THREAD_BLOCK_SIZE]; // Allocate shared memory to hold the partial sums. unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; // Obtain the thread ID. unsigned int stride = blockDim.x * gridDim.x; double sum = 0.0f; unsigned int i = thread_id; /* Compute your partial sum. */ while(i < num_elements){ sum += (double)A[i]; i += stride; } sum_per_thread[threadIdx.x] = (float)sum; // Copy sum to shared memory. __syncthreads(); // Wait for all threads in the thread block to finish up. /* Reduce the values generated by the thread block to a single value to be sent back to the CPU. The following code assumes that the number of threads per block is power of two. */ i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i) sum_per_thread[threadIdx.x] += sum_per_thread[threadIdx.x + i]; __syncthreads(); i /= 2; } /* Write the partial sum computed by this thread block to global memory. */ if(threadIdx.x == 0) C[blockIdx.x] = sum_per_thread[0]; } /* This function uses a compare and swap technique to acquire a mutex/lock. */ __device__ void lock(int *mutex) { while(atomicCAS(mutex, 0, 1) != 0); } /* This function uses an atomic exchange operation to release the mutex/lock. */ __device__ void unlock(int *mutex) { atomicExch(mutex, 0); } __global__ void vector_reduction_kernel_using_atomics(float *A, float *result, unsigned int num_elements, int *mutex) { __shared__ float sum_per_thread[THREAD_BLOCK_SIZE]; unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; // Obtain the index of the thread. unsigned int stride = blockDim.x * gridDim.x; double sum = 0.0f; unsigned int i = thread_id; /* Generate the partial sum. */ while(i < num_elements){ sum += (double)A[i]; i += stride; } sum_per_thread[threadIdx.x] = (float)sum; // Copy sum to shared memory. __syncthreads(); // Wait for all thread in the thread block to finish. /* Reduce the values generated by the thread block to a single value. We assume that the number of threads per block is power of two. */ i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i) sum_per_thread[threadIdx.x] += sum_per_thread[threadIdx.x + i]; __syncthreads(); i /= 2; } /* Accumulate the sum computed by this thread block into the global shared variable. */ if(threadIdx.x == 0){ lock(mutex); *result += sum_per_thread[0]; unlock(mutex); } } #endif // #ifndef _VECTOR_REDUCTION_KERNEL_H
bea9614e1339b775dd46c7b769c03ab664fb776f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #include <string.h> typedef unsigned int uint; #define BLOCK_SIZE 1024 __global__ void scanGPU(uint* d_list, uint* flags, uint* AuxArray, uint* AuxScannedArray, int dim) { extern __shared__ uint I; if (threadIdx.x == 0) { I = atomicAdd(&AuxScannedArray[0], 1); } __syncthreads(); extern __shared__ uint scanBlockSum[2 * BLOCK_SIZE]; uint t = threadIdx.x; uint s = 2 * I * blockDim.x; if (s + t < dim) scanBlockSum[t] = d_list[s + t]; if (s + t + blockDim.x < dim) scanBlockSum[blockDim.x + t] = d_list[s + blockDim.x + t]; __syncthreads(); for (uint stride = 1; stride <= blockDim.x; stride *= 2) { int idx = (threadIdx.x + 1) * stride * 2 - 1; if (idx < 2 * blockDim.x) scanBlockSum[idx] += scanBlockSum[idx - stride]; __syncthreads(); } for (uint stride = blockDim.x / 2; stride > 0; stride /= 2) { __syncthreads(); int idx = (threadIdx.x + 1) * stride * 2 - 1; if (idx + stride < 2 * blockDim.x) { scanBlockSum[idx + stride] += scanBlockSum[idx]; } } __syncthreads(); if (threadIdx.x == 0) { if (I == 0) { AuxArray[I] = scanBlockSum[2 * blockDim.x - 1]; atomicAdd(&flags[I], 1); } else { while (atomicAdd(&flags[I - 1], 0) == 0) { ; } AuxArray[I] = AuxArray[I - 1] + scanBlockSum[2 * blockDim.x - 1]; __threadfence(); atomicAdd(&flags[I], 1); } } __syncthreads(); if (I > 0) { scanBlockSum[t] += AuxArray[I - 1]; scanBlockSum[t + blockDim.x] += AuxArray[I - 1]; } __syncthreads(); if (s + t < dim) d_list[s + t] = scanBlockSum[t]; if (s + t + blockDim.x < dim) d_list[s + blockDim.x + t] = scanBlockSum[blockDim.x + t]; } void scanCPU(uint* list, uint* sum, int dim) { uint res = 0; for (int i = 0; i < dim; ++i) { res += list[i]; sum[i] = res; } return; } int check_input(int dim) { if (dim < 0) { printf("Invalid list size \n"); printf("list size must be >= 0 \n"); return -1; } return 1; } int main(int argc, char** argv) { // Input and check if (argc != 3) { printf("Error input Parameter \n"); printf("Please input dim for input list \n"); printf("Example: ./execute_file -i dim \n"); return 0; } if (argc == 3 && (strcmp(argv[1], "-i") == 0)) { printf("Input Data\n"); } else { printf("Please Follow Format to Run Program: ./execute_file -i dim\n"); return -1; } const int Dim = atoi(argv[2]); if (check_input(Dim) == 1) { printf("Input is Valid \n"); } else { return -1; } printf("InputSize = %d, Block size = %d.\n\n", Dim, BLOCK_SIZE); //timer float gpu_time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //clock_t start, end; // Initialize list data uint* list = (uint*)malloc(Dim * sizeof(uint)); srand((unsigned)time(NULL)); for (uint i = 0; i < Dim; i++) { list[i] = rand(); } // Allocate memory on host for saving results uint* scan_CPU = (uint*)malloc(Dim * sizeof(uint)); uint* scan_GPU = (uint*)malloc(Dim * sizeof(uint)); // Allocate memory on device for variable uint* d_list, * d_flags, * d_AuxArray, * d_AuxScannedArray; hipMalloc((uint**)&d_list, Dim * sizeof(uint)); hipMalloc((uint**)&d_AuxScannedArray, sizeof(uint)); hipMalloc((uint**)&d_flags, (int)ceil(1.0 * Dim / BLOCK_SIZE) * sizeof(uint)); hipMalloc((uint**)&d_AuxArray, (int)ceil(1.0 * Dim / BLOCK_SIZE) * sizeof(uint)); hipMemset(d_flags, 0, (int)ceil(1.0 * Dim / BLOCK_SIZE) * sizeof(uint)); hipMemset(d_AuxScannedArray, 0, sizeof(uint)); hipMemcpy(d_list, list, Dim * sizeof(uint), hipMemcpyHostToDevice); hipEventRecord(start, 0); // Initialize and Invoke the kernel dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((int)ceil(1.0 * Dim / dimBlock.x)); scanGPU << <dimGrid, dimBlock, (2 * BLOCK_SIZE + 1) * sizeof(uint) >> > (d_list, d_flags, d_AuxArray, d_AuxScannedArray, Dim); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&gpu_time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); // Copy the result to host hipMemcpy(scan_GPU, d_list, Dim * sizeof(uint), hipMemcpyDeviceToHost); //start = clock(); // Invoke the CPU scan algorithm scanCPU(list, scan_CPU, Dim); //end = clock(); //double cpu_time = (double)(end - start) / CLOCKS_PER_SEC * 1000; // Check the result int check = 1; for (int i = 0; i < Dim; i++) { if (scan_CPU[i] != scan_GPU[i]) { check = 0; } } if (check == 1) { printf("Results match.\n"); } else { printf("Wrong Result.\n"); } // Performance calculation printf("GPU executing time: %.4f ms, throughput = %.4f MElements/s\n", gpu_time, Dim / gpu_time / 1000); //printf("CPU executing time: %.4f ms, throughput = %.4f MElements/s\n", cpu_time, Dim / cpu_time / 1000); // Deallocate memory hipFree(d_list); hipFree(d_flags); hipFree(d_AuxArray); hipFree(d_AuxScannedArray); hipHostFree(scan_CPU); hipHostFree(scan_GPU); return 0; }
bea9614e1339b775dd46c7b769c03ab664fb776f.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #include <string.h> typedef unsigned int uint; #define BLOCK_SIZE 1024 __global__ void scanGPU(uint* d_list, uint* flags, uint* AuxArray, uint* AuxScannedArray, int dim) { extern __shared__ uint I; if (threadIdx.x == 0) { I = atomicAdd(&AuxScannedArray[0], 1); } __syncthreads(); extern __shared__ uint scanBlockSum[2 * BLOCK_SIZE]; uint t = threadIdx.x; uint s = 2 * I * blockDim.x; if (s + t < dim) scanBlockSum[t] = d_list[s + t]; if (s + t + blockDim.x < dim) scanBlockSum[blockDim.x + t] = d_list[s + blockDim.x + t]; __syncthreads(); for (uint stride = 1; stride <= blockDim.x; stride *= 2) { int idx = (threadIdx.x + 1) * stride * 2 - 1; if (idx < 2 * blockDim.x) scanBlockSum[idx] += scanBlockSum[idx - stride]; __syncthreads(); } for (uint stride = blockDim.x / 2; stride > 0; stride /= 2) { __syncthreads(); int idx = (threadIdx.x + 1) * stride * 2 - 1; if (idx + stride < 2 * blockDim.x) { scanBlockSum[idx + stride] += scanBlockSum[idx]; } } __syncthreads(); if (threadIdx.x == 0) { if (I == 0) { AuxArray[I] = scanBlockSum[2 * blockDim.x - 1]; atomicAdd(&flags[I], 1); } else { while (atomicAdd(&flags[I - 1], 0) == 0) { ; } AuxArray[I] = AuxArray[I - 1] + scanBlockSum[2 * blockDim.x - 1]; __threadfence(); atomicAdd(&flags[I], 1); } } __syncthreads(); if (I > 0) { scanBlockSum[t] += AuxArray[I - 1]; scanBlockSum[t + blockDim.x] += AuxArray[I - 1]; } __syncthreads(); if (s + t < dim) d_list[s + t] = scanBlockSum[t]; if (s + t + blockDim.x < dim) d_list[s + blockDim.x + t] = scanBlockSum[blockDim.x + t]; } void scanCPU(uint* list, uint* sum, int dim) { uint res = 0; for (int i = 0; i < dim; ++i) { res += list[i]; sum[i] = res; } return; } int check_input(int dim) { if (dim < 0) { printf("Invalid list size \n"); printf("list size must be >= 0 \n"); return -1; } return 1; } int main(int argc, char** argv) { // Input and check if (argc != 3) { printf("Error input Parameter \n"); printf("Please input dim for input list \n"); printf("Example: ./execute_file -i dim \n"); return 0; } if (argc == 3 && (strcmp(argv[1], "-i") == 0)) { printf("Input Data\n"); } else { printf("Please Follow Format to Run Program: ./execute_file -i dim\n"); return -1; } const int Dim = atoi(argv[2]); if (check_input(Dim) == 1) { printf("Input is Valid \n"); } else { return -1; } printf("InputSize = %d, Block size = %d.\n\n", Dim, BLOCK_SIZE); //timer float gpu_time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //clock_t start, end; // Initialize list data uint* list = (uint*)malloc(Dim * sizeof(uint)); srand((unsigned)time(NULL)); for (uint i = 0; i < Dim; i++) { list[i] = rand(); } // Allocate memory on host for saving results uint* scan_CPU = (uint*)malloc(Dim * sizeof(uint)); uint* scan_GPU = (uint*)malloc(Dim * sizeof(uint)); // Allocate memory on device for variable uint* d_list, * d_flags, * d_AuxArray, * d_AuxScannedArray; cudaMalloc((uint**)&d_list, Dim * sizeof(uint)); cudaMalloc((uint**)&d_AuxScannedArray, sizeof(uint)); cudaMalloc((uint**)&d_flags, (int)ceil(1.0 * Dim / BLOCK_SIZE) * sizeof(uint)); cudaMalloc((uint**)&d_AuxArray, (int)ceil(1.0 * Dim / BLOCK_SIZE) * sizeof(uint)); cudaMemset(d_flags, 0, (int)ceil(1.0 * Dim / BLOCK_SIZE) * sizeof(uint)); cudaMemset(d_AuxScannedArray, 0, sizeof(uint)); cudaMemcpy(d_list, list, Dim * sizeof(uint), cudaMemcpyHostToDevice); cudaEventRecord(start, 0); // Initialize and Invoke the kernel dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid((int)ceil(1.0 * Dim / dimBlock.x)); scanGPU << <dimGrid, dimBlock, (2 * BLOCK_SIZE + 1) * sizeof(uint) >> > (d_list, d_flags, d_AuxArray, d_AuxScannedArray, Dim); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); // Copy the result to host cudaMemcpy(scan_GPU, d_list, Dim * sizeof(uint), cudaMemcpyDeviceToHost); //start = clock(); // Invoke the CPU scan algorithm scanCPU(list, scan_CPU, Dim); //end = clock(); //double cpu_time = (double)(end - start) / CLOCKS_PER_SEC * 1000; // Check the result int check = 1; for (int i = 0; i < Dim; i++) { if (scan_CPU[i] != scan_GPU[i]) { check = 0; } } if (check == 1) { printf("Results match.\n"); } else { printf("Wrong Result.\n"); } // Performance calculation printf("GPU executing time: %.4f ms, throughput = %.4f MElements/s\n", gpu_time, Dim / gpu_time / 1000); //printf("CPU executing time: %.4f ms, throughput = %.4f MElements/s\n", cpu_time, Dim / cpu_time / 1000); // Deallocate memory cudaFree(d_list); cudaFree(d_flags); cudaFree(d_AuxArray); cudaFree(d_AuxScannedArray); cudaFreeHost(scan_CPU); cudaFreeHost(scan_GPU); return 0; }
40d2c0f5e91236b77b353d6b072d9a516b882318.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #define GRID_SIZE 100 #define BLOCK_SIZE 100 #define MAX_INPUT_COUNT 10 #define MAX_OUTPUT_COUNT 10 #define MAX_WORD_COUNT 20 #define NUM_KEYS 100 typedef struct tagWord { char szWord[30]; }Word; typedef struct tagKeyPair { char szWord[30]; int nCount; }KeyPair; typedef struct tagInputData { Word pWord[20]; int nWordCount; }InputData; typedef KeyPair OutputData; typedef struct tagKeyValueData { KeyPair pKeyPair[NUM_KEYS]; int nCount; }KeyValueData; int g_nKeyData = 0; __device__ void mapper(InputData *input, KeyValueData *keyData) { int nWordCount = input->nWordCount; keyData->nCount = nWordCount; for (int i = 0; i<nWordCount; i++) { KeyPair* keyPair = &keyData->pKeyPair[i]; int j = 0; char* p = input->pWord[i].szWord; while(*p != 0) { keyPair->szWord[j] = *p; p++; j++; } keyPair->nCount = 1; } } __device__ bool compare(char* sz1, char* sz2) { char* p2 = sz2; int n = 0; while (*p2 != 0) { if (sz1[n] != *p2) return false; p2++; n++; } return true; } __device__ void reducer(KeyValueData *keyData, OutputData *output, int nInputCount, int* nOutputCount) { int nIndex = 0; for (int index = 0; index < nInputCount; index++) { KeyValueData* pKeyData = &keyData[index]; int nCount = pKeyData->nCount; for (int i = 0; i<nCount; i++) { bool bExist = false; int j = 0; KeyPair keyPair = pKeyData->pKeyPair[i]; for (int j = 0; j <= nIndex; j++) { if (compare(output[j].szWord, keyPair.szWord)) { output[j].nCount++; bExist = true; break; } } if (bExist) continue; j = 0; char* p = keyPair.szWord; //printf("Word2:%s\n", keyPair.szWord); while (*p != 0) { output[nIndex].szWord[j] = *p; p++; j++; } output[nIndex].nCount = 1; nIndex++; } } *nOutputCount = nIndex; } __global__ void mapKernel(InputData *input, int nInputCount, KeyValueData *pairs) { int indexWithinTheGrid = threadIdx.x + blockIdx.x * blockDim.x; int gridStride = gridDim.x * blockDim.x; for (int i = indexWithinTheGrid; i < nInputCount; i += gridStride) { mapper(&input[i], &pairs[i]); } } __global__ void reduceKernel(KeyValueData *pairs, int nInputCount, OutputData *output, int* nOutputCount) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < 1; i += blockDim.x * gridDim.x) { reducer(pairs, output, nInputCount, nOutputCount); } } void cudaMap(InputData *input, int nInputCount, KeyValueData *pairs) { mapKernel << <GRID_SIZE, BLOCK_SIZE >> >(input, nInputCount, pairs); } void cudaReduce(KeyValueData *pairs, int nInputCount, OutputData *output, int* nOutputCount) { reduceKernel << <GRID_SIZE, BLOCK_SIZE >> >(pairs, nInputCount, output, nOutputCount); } void runMapReduce(InputData *input, int nInputCount, OutputData *output, int* nOutputCount) { InputData *dev_input; OutputData *dev_output; KeyValueData *dev_pairs; int *dev_count; size_t input_size = nInputCount * sizeof(InputData); size_t output_size = MAX_OUTPUT_COUNT * sizeof(OutputData); size_t pairs_size = nInputCount * sizeof(KeyValueData); hipMalloc(&dev_input, input_size); hipMalloc(&dev_pairs, pairs_size); hipMalloc(&dev_count, sizeof(int)); hipMemcpy(dev_input, input, input_size, hipMemcpyHostToDevice); cudaMap(dev_input, nInputCount, dev_pairs); hipFree(dev_input); hipMalloc(&dev_output, output_size); hipMemset(dev_output, 0, output_size); cudaReduce(dev_pairs, nInputCount, dev_output, dev_count); hipMemcpy(output, dev_output, output_size, hipMemcpyDeviceToHost); hipMemcpy(nOutputCount, dev_count, sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_count); hipFree(dev_pairs); hipFree(dev_output); } int main(int argc, char const *argv[]) { printf("Input Data:\n"); FILE* pFile = fopen("test.txt", "rt"); char c; // Input Text and Splitting InputData* pInputData = (InputData*)malloc(MAX_INPUT_COUNT*sizeof(InputData)); OutputData* pOutputData = (OutputData*)malloc(MAX_OUTPUT_COUNT * sizeof(OutputData)); int nInputCount = 0; int nOutputCount = 0; if (pFile) { char szLine[100]; while(!feof(pFile)) { memset(szLine, 0, 100); fgets(szLine, 100, pFile); printf("%s", szLine); pInputData[nInputCount].nWordCount = 0; int nWordIndex = 0; int nIndex = 0; char szWord[20]; if(strlen(szLine)<2) break; for (int i = 0; i < strlen(szLine); i++) { if (szLine[i] == ' ' || szLine[i] == 0x0d || szLine[i] == 0x0A) { szWord[nIndex] = 0; nIndex = 0; strcpy(pInputData[nInputCount].pWord[nWordIndex].szWord, szWord); pInputData[nInputCount].nWordCount++; nWordIndex++; if (szLine[i] == 0x0d) break; } else { szWord[nIndex] = szLine[i]; nIndex++; } } nInputCount++; } fclose(pFile); } // Splitting End int nTotalCount = 0; runMapReduce(pInputData, nInputCount, pOutputData, &nOutputCount); printf("--------------------------\n"); for (int i = 0; i < nOutputCount; i++) { printf("%s\t%d\n", pOutputData[i].szWord, pOutputData[i].nCount); nTotalCount += pOutputData[i].nCount; } // Output WordCount printf("--------------------------\n"); printf("Total Count:%d\n", nTotalCount); free(pOutputData); free(pInputData); return 0; }
40d2c0f5e91236b77b353d6b072d9a516b882318.cu
#include <cstdio> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <cuda_runtime.h> #define GRID_SIZE 100 #define BLOCK_SIZE 100 #define MAX_INPUT_COUNT 10 #define MAX_OUTPUT_COUNT 10 #define MAX_WORD_COUNT 20 #define NUM_KEYS 100 typedef struct tagWord { char szWord[30]; }Word; typedef struct tagKeyPair { char szWord[30]; int nCount; }KeyPair; typedef struct tagInputData { Word pWord[20]; int nWordCount; }InputData; typedef KeyPair OutputData; typedef struct tagKeyValueData { KeyPair pKeyPair[NUM_KEYS]; int nCount; }KeyValueData; int g_nKeyData = 0; __device__ void mapper(InputData *input, KeyValueData *keyData) { int nWordCount = input->nWordCount; keyData->nCount = nWordCount; for (int i = 0; i<nWordCount; i++) { KeyPair* keyPair = &keyData->pKeyPair[i]; int j = 0; char* p = input->pWord[i].szWord; while(*p != 0) { keyPair->szWord[j] = *p; p++; j++; } keyPair->nCount = 1; } } __device__ bool compare(char* sz1, char* sz2) { char* p2 = sz2; int n = 0; while (*p2 != 0) { if (sz1[n] != *p2) return false; p2++; n++; } return true; } __device__ void reducer(KeyValueData *keyData, OutputData *output, int nInputCount, int* nOutputCount) { int nIndex = 0; for (int index = 0; index < nInputCount; index++) { KeyValueData* pKeyData = &keyData[index]; int nCount = pKeyData->nCount; for (int i = 0; i<nCount; i++) { bool bExist = false; int j = 0; KeyPair keyPair = pKeyData->pKeyPair[i]; for (int j = 0; j <= nIndex; j++) { if (compare(output[j].szWord, keyPair.szWord)) { output[j].nCount++; bExist = true; break; } } if (bExist) continue; j = 0; char* p = keyPair.szWord; //printf("Word2:%s\n", keyPair.szWord); while (*p != 0) { output[nIndex].szWord[j] = *p; p++; j++; } output[nIndex].nCount = 1; nIndex++; } } *nOutputCount = nIndex; } __global__ void mapKernel(InputData *input, int nInputCount, KeyValueData *pairs) { int indexWithinTheGrid = threadIdx.x + blockIdx.x * blockDim.x; int gridStride = gridDim.x * blockDim.x; for (int i = indexWithinTheGrid; i < nInputCount; i += gridStride) { mapper(&input[i], &pairs[i]); } } __global__ void reduceKernel(KeyValueData *pairs, int nInputCount, OutputData *output, int* nOutputCount) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < 1; i += blockDim.x * gridDim.x) { reducer(pairs, output, nInputCount, nOutputCount); } } void cudaMap(InputData *input, int nInputCount, KeyValueData *pairs) { mapKernel << <GRID_SIZE, BLOCK_SIZE >> >(input, nInputCount, pairs); } void cudaReduce(KeyValueData *pairs, int nInputCount, OutputData *output, int* nOutputCount) { reduceKernel << <GRID_SIZE, BLOCK_SIZE >> >(pairs, nInputCount, output, nOutputCount); } void runMapReduce(InputData *input, int nInputCount, OutputData *output, int* nOutputCount) { InputData *dev_input; OutputData *dev_output; KeyValueData *dev_pairs; int *dev_count; size_t input_size = nInputCount * sizeof(InputData); size_t output_size = MAX_OUTPUT_COUNT * sizeof(OutputData); size_t pairs_size = nInputCount * sizeof(KeyValueData); cudaMalloc(&dev_input, input_size); cudaMalloc(&dev_pairs, pairs_size); cudaMalloc(&dev_count, sizeof(int)); cudaMemcpy(dev_input, input, input_size, cudaMemcpyHostToDevice); cudaMap(dev_input, nInputCount, dev_pairs); cudaFree(dev_input); cudaMalloc(&dev_output, output_size); cudaMemset(dev_output, 0, output_size); cudaReduce(dev_pairs, nInputCount, dev_output, dev_count); cudaMemcpy(output, dev_output, output_size, cudaMemcpyDeviceToHost); cudaMemcpy(nOutputCount, dev_count, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_count); cudaFree(dev_pairs); cudaFree(dev_output); } int main(int argc, char const *argv[]) { printf("Input Data:\n"); FILE* pFile = fopen("test.txt", "rt"); char c; // Input Text and Splitting InputData* pInputData = (InputData*)malloc(MAX_INPUT_COUNT*sizeof(InputData)); OutputData* pOutputData = (OutputData*)malloc(MAX_OUTPUT_COUNT * sizeof(OutputData)); int nInputCount = 0; int nOutputCount = 0; if (pFile) { char szLine[100]; while(!feof(pFile)) { memset(szLine, 0, 100); fgets(szLine, 100, pFile); printf("%s", szLine); pInputData[nInputCount].nWordCount = 0; int nWordIndex = 0; int nIndex = 0; char szWord[20]; if(strlen(szLine)<2) break; for (int i = 0; i < strlen(szLine); i++) { if (szLine[i] == ' ' || szLine[i] == 0x0d || szLine[i] == 0x0A) { szWord[nIndex] = 0; nIndex = 0; strcpy(pInputData[nInputCount].pWord[nWordIndex].szWord, szWord); pInputData[nInputCount].nWordCount++; nWordIndex++; if (szLine[i] == 0x0d) break; } else { szWord[nIndex] = szLine[i]; nIndex++; } } nInputCount++; } fclose(pFile); } // Splitting End int nTotalCount = 0; runMapReduce(pInputData, nInputCount, pOutputData, &nOutputCount); printf("--------------------------\n"); for (int i = 0; i < nOutputCount; i++) { printf("%s\t%d\n", pOutputData[i].szWord, pOutputData[i].nCount); nTotalCount += pOutputData[i].nCount; } // Output WordCount printf("--------------------------\n"); printf("Total Count:%d\n", nTotalCount); free(pOutputData); free(pInputData); return 0; }
1288ce021b0748c2978149c9fb900608b5cdbac6.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-License-Identifier: BSD-3-Clause * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2020-, Open Perception, Inc. * Author: Shrijit Singh <[email protected]> * */ #include <mmul.cuh> void mmul_gpu(const executor::cuda_executor<>& ex, const MatrixXd& a, const MatrixXd& b, MatrixXd& c) { double *a_d, *b_d, *c_d; auto device_upload = [=](const void* var, std::size_t size) { void* gpu_var; hipMallocManaged(&gpu_var, size); memcpy(gpu_var, var, size); return gpu_var; }; a_d = static_cast<double*>(device_upload(a.data(), a.size() * sizeof(double))); b_d = static_cast<double*>(device_upload(b.data(), b.size() * sizeof(double))); c_d = static_cast<double*>(device_upload(c.data(), c.size() * sizeof(double))); auto shape = executor::executor_shape_t<executor::cuda_executor<>>{ {static_cast<unsigned int>(ceil(a.rows() / 2.0)), static_cast<unsigned int>(ceil(b.cols() / 2.0)), 1}, {2, 2, 1}}; auto mul = [=] __device__() { unsigned row = blockIdx.y * blockDim.y + threadIdx.y; unsigned col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0; if (col < b.cols() && row < a.rows()) { for (int i = 0; i < a.cols(); i++) { sum += a_d[row * a.cols() + i] * b_d[i * b.cols() + col]; } c_d[row * b.cols() + col] = sum; } }; ex.bulk_execute(mul, shape); memcpy(static_cast<double*>(c.data()), c_d, 9 * sizeof(double)); }
1288ce021b0748c2978149c9fb900608b5cdbac6.cu
/* * SPDX-License-Identifier: BSD-3-Clause * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2020-, Open Perception, Inc. * Author: Shrijit Singh <[email protected]> * */ #include <mmul.cuh> void mmul_gpu(const executor::cuda_executor<>& ex, const MatrixXd& a, const MatrixXd& b, MatrixXd& c) { double *a_d, *b_d, *c_d; auto device_upload = [=](const void* var, std::size_t size) { void* gpu_var; cudaMallocManaged(&gpu_var, size); memcpy(gpu_var, var, size); return gpu_var; }; a_d = static_cast<double*>(device_upload(a.data(), a.size() * sizeof(double))); b_d = static_cast<double*>(device_upload(b.data(), b.size() * sizeof(double))); c_d = static_cast<double*>(device_upload(c.data(), c.size() * sizeof(double))); auto shape = executor::executor_shape_t<executor::cuda_executor<>>{ {static_cast<unsigned int>(ceil(a.rows() / 2.0)), static_cast<unsigned int>(ceil(b.cols() / 2.0)), 1}, {2, 2, 1}}; auto mul = [=] __device__() { unsigned row = blockIdx.y * blockDim.y + threadIdx.y; unsigned col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0; if (col < b.cols() && row < a.rows()) { for (int i = 0; i < a.cols(); i++) { sum += a_d[row * a.cols() + i] * b_d[i * b.cols() + col]; } c_d[row * b.cols() + col] = sum; } }; ex.bulk_execute(mul, shape); memcpy(static_cast<double*>(c.data()), c_d, 9 * sizeof(double)); }
441f9f2c7507773daae2012fd72770b8730182c9.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "TH/THHalf.h" #include "THHHalfAutoNumerics.cuh" #include <THH/THHApply.cuh> template <typename T> struct ThresholdUpdateOutput { const T threshold_; const T val_; ThresholdUpdateOutput(T threshold, T val) : threshold_(threshold) , val_(val) {} __device__ __forceinline__ void operator()(T *out, T *in) { T x = *in; *out = (x <= threshold_) ? val_ : x; // this order propagates NaN } }; // in-place variant template <typename T> struct ThresholdUpdateOutputIP { const T threshold_; const T val_; ThresholdUpdateOutputIP(T threshold, T val) : threshold_(threshold) , val_(val) {} __device__ __forceinline__ void operator()(T *x) { *x = (*x <= threshold_) ? val_ : *x; // this order propagates NaN } }; template <typename T> struct ThresholdUpdateGradInput { const T threshold_; ThresholdUpdateGradInput(T threshold) : threshold_(threshold) {} __device__ __forceinline__ void operator()( T *gradInput, T *input, T *gradOutput) const { *gradInput = (*input <= threshold_) ? ScalarConvert<int, T>::to(0) : *gradOutput; // this order propagates NaN } }; template <typename T> struct ThresholdUpdateGradInputIP { const T threshold_; ThresholdUpdateGradInputIP(T threshold) : threshold_(threshold) {} __device__ __forceinline__ void operator()( T *gradOutput, T *input) const { *gradOutput = (*input <= threshold_) ? ScalarConvert<int, T>::to(0) : *gradOutput; // this order propagates NaN } }; #include "generic/Threshold.cu" #include "THHGenerateFloatTypes.h"
441f9f2c7507773daae2012fd72770b8730182c9.cu
#include "THCUNN.h" #include "TH/THHalf.h" #include "THCHalfAutoNumerics.cuh" #include <THC/THCApply.cuh> template <typename T> struct ThresholdUpdateOutput { const T threshold_; const T val_; ThresholdUpdateOutput(T threshold, T val) : threshold_(threshold) , val_(val) {} __device__ __forceinline__ void operator()(T *out, T *in) { T x = *in; *out = (x <= threshold_) ? val_ : x; // this order propagates NaN } }; // in-place variant template <typename T> struct ThresholdUpdateOutputIP { const T threshold_; const T val_; ThresholdUpdateOutputIP(T threshold, T val) : threshold_(threshold) , val_(val) {} __device__ __forceinline__ void operator()(T *x) { *x = (*x <= threshold_) ? val_ : *x; // this order propagates NaN } }; template <typename T> struct ThresholdUpdateGradInput { const T threshold_; ThresholdUpdateGradInput(T threshold) : threshold_(threshold) {} __device__ __forceinline__ void operator()( T *gradInput, T *input, T *gradOutput) const { *gradInput = (*input <= threshold_) ? ScalarConvert<int, T>::to(0) : *gradOutput; // this order propagates NaN } }; template <typename T> struct ThresholdUpdateGradInputIP { const T threshold_; ThresholdUpdateGradInputIP(T threshold) : threshold_(threshold) {} __device__ __forceinline__ void operator()( T *gradOutput, T *input) const { *gradOutput = (*input <= threshold_) ? ScalarConvert<int, T>::to(0) : *gradOutput; // this order propagates NaN } }; #include "generic/Threshold.cu" #include "THCGenerateFloatTypes.h"
8f08a02fcad6a5e6e0b76483b3722b5bc3ffab4f.hip
// !!! This is a file automatically generated by hipify!!! #include "Possibilities.cuh" #include <iostream> #include "Timer.h" #include "LargeNumber.h" #include <numeric> #include "InfInt.h" Possibilities::Possibilities(LineDescription ld): desc(std::move(ld)), loaded(false) { std::cout << "poss ctor: "; for (int i = 0; i < getRules().size(); i++) std::cout << getRules().at(i); std::cout << "\t\t"; LoadAllPossibilities(); std::cout << getPossSize(); std::cout << std::endl; /*char syms[] = { ' ', 'X', '-' }; for (int i = 0; i < possSize; i++) { std::cout << i + 1 << ")\t"; for (int j = 0; j < ld.first; j++) { std::cout << syms[getLinePtr(possSize - i - 1)[j]]; } std::cout << std::endl; } std::cout << std::endl;*/ } std::string Possibilities::toString() const { std::string ret = getPossSize() + ") "; for (int i = 0; i < getRules().size(); i++) { ret += getRules().at(i) + " "; } return ret; } _DEV_HOST_ const Cell * Possibilities::getRawPossibilities() const { return possibilities; } _DEV_HOST_ Cell * Possibilities::getRawPossibilities() { return possibilities; } Possibilities::Possibilities(const Possibilities & other) : desc(other.desc) { std::cout << "possibilities copy constractor" << std::endl; } Possibilities::~Possibilities() { MemFreeSherd(possibilities); } _DEV_HOST_ Cell * Possibilities::getLinePtr(int lineIndex) { return &possibilities[lineIndex * getLineLen()]; } _DEV_HOST_ const Cell * Possibilities::getLinePtr(int lineIndex) const { return &possibilities[lineIndex * getLineLen()]; } const std::vector<int>& Possibilities::getRules() const { return desc.second; } _DEV_HOST_ int Possibilities::getLineLen() const { return desc.first; } _DEV_HOST_ int Possibilities::getPossSize() const { return possSize; } _DEV_HOST_ Cell * Possibilities::getLinePtr(Cell * raw, int lineIndex, int lineSize) { return &raw[lineIndex * lineSize]; } _DEV_HOST_ const Cell * Possibilities::getLinePtr(const Cell * raw, int lineIndex, int lineSize) { return &raw[lineIndex * lineSize]; } void Possibilities::LoadAllPossibilities() { Timer::start("possibi"); possSize = calcPossSize(); if (possSize > MAX_POSSIBILITIES_TO_CALCULATE) { loaded = false; Timer::abort("possibi"); } else { int numInserted = 0; MemAllocSherd(&possibilities, sizeof(Cell)*possSize*getLineLen()); tempMemory = new Cell[sizeof(Cell) * possSize * getLineLen()]; fillPossibilities(0, getRules().begin(), numInserted, std::vector<Cell>(getLineLen(), Cell::WHITE)); hipMemcpy(possibilities, tempMemory, sizeof(Cell)*possSize*getLineLen(), hipMemcpyHostToDevice); loaded = true; delete tempMemory; Timer::stop("possibi"); } } int binomialCoeff(int n, int k) { // Base Cases if (k == 0 || k == n) return 1; // Recur return binomialCoeff(n - 1, k - 1) + binomialCoeff(n - 1, k); } int Possibilities::calcPossSize() { int res; const int ruleSum = std::accumulate(getRules().begin(), getRules().end(), 0); const int W = getLineLen() - ruleSum - getRules().size() + 1; /*{ Timer::start("LargeNumber"); LargeNumber n1 = LargeNumber::Factorial(getRules().size() + W); LargeNumber n2 = LargeNumber::Factorial(getRules().size()); LargeNumber n3 = LargeNumber::Factorial(W); n2.Multiply(n3); res1 = n1.Divide(n2); Timer::stop("LargeNumber"); }*/ Timer::start("pos size"); res = binomialCoeff(getRules().size() + W, W); Timer::stop("pos size"); //assert(res1 == res2); return res; } bool Possibilities::isLoaded() const { return loaded; } void Possibilities::fillPossibilities( int startIndex, std::vector<int>::const_iterator currRule, int & insertLineIndex, std::vector<Cell> currLine) { if (currRule == getRules().end()) { memcpy(tempMemory + insertLineIndex * getLineLen(), currLine.data(), sizeof(Cell)*getLineLen()); insertLineIndex++; return; } const int dist = std::distance(currRule, getRules().end()) - 1; const int sum = std::accumulate(currRule, getRules().end(), 0); if (startIndex + dist + sum > getLineLen()) return; if (startIndex + dist + sum + 1 <= getLineLen()) fillPossibilities(startIndex + 1, currRule, insertLineIndex, currLine); paint(currLine, startIndex, *currRule); fillPossibilities(startIndex + *currRule + 1, currRule + 1, insertLineIndex, currLine); } void Possibilities::paint(std::vector<Cell>& line, int startIndex, int rule) { for (int i = startIndex; i < startIndex + rule; i++) { line.at(i) = Cell::BLACK; } }
8f08a02fcad6a5e6e0b76483b3722b5bc3ffab4f.cu
#include "Possibilities.cuh" #include <iostream> #include "Timer.h" #include "LargeNumber.h" #include <numeric> #include "InfInt.h" Possibilities::Possibilities(LineDescription ld): desc(std::move(ld)), loaded(false) { std::cout << "poss ctor: "; for (int i = 0; i < getRules().size(); i++) std::cout << getRules().at(i); std::cout << "\t\t"; LoadAllPossibilities(); std::cout << getPossSize(); std::cout << std::endl; /*char syms[] = { ' ', 'X', '-' }; for (int i = 0; i < possSize; i++) { std::cout << i + 1 << ")\t"; for (int j = 0; j < ld.first; j++) { std::cout << syms[getLinePtr(possSize - i - 1)[j]]; } std::cout << std::endl; } std::cout << std::endl;*/ } std::string Possibilities::toString() const { std::string ret = getPossSize() + ") "; for (int i = 0; i < getRules().size(); i++) { ret += getRules().at(i) + " "; } return ret; } _DEV_HOST_ const Cell * Possibilities::getRawPossibilities() const { return possibilities; } _DEV_HOST_ Cell * Possibilities::getRawPossibilities() { return possibilities; } Possibilities::Possibilities(const Possibilities & other) : desc(other.desc) { std::cout << "possibilities copy constractor" << std::endl; } Possibilities::~Possibilities() { MemFreeSherd(possibilities); } _DEV_HOST_ Cell * Possibilities::getLinePtr(int lineIndex) { return &possibilities[lineIndex * getLineLen()]; } _DEV_HOST_ const Cell * Possibilities::getLinePtr(int lineIndex) const { return &possibilities[lineIndex * getLineLen()]; } const std::vector<int>& Possibilities::getRules() const { return desc.second; } _DEV_HOST_ int Possibilities::getLineLen() const { return desc.first; } _DEV_HOST_ int Possibilities::getPossSize() const { return possSize; } _DEV_HOST_ Cell * Possibilities::getLinePtr(Cell * raw, int lineIndex, int lineSize) { return &raw[lineIndex * lineSize]; } _DEV_HOST_ const Cell * Possibilities::getLinePtr(const Cell * raw, int lineIndex, int lineSize) { return &raw[lineIndex * lineSize]; } void Possibilities::LoadAllPossibilities() { Timer::start("possibi"); possSize = calcPossSize(); if (possSize > MAX_POSSIBILITIES_TO_CALCULATE) { loaded = false; Timer::abort("possibi"); } else { int numInserted = 0; MemAllocSherd(&possibilities, sizeof(Cell)*possSize*getLineLen()); tempMemory = new Cell[sizeof(Cell) * possSize * getLineLen()]; fillPossibilities(0, getRules().begin(), numInserted, std::vector<Cell>(getLineLen(), Cell::WHITE)); cudaMemcpy(possibilities, tempMemory, sizeof(Cell)*possSize*getLineLen(), cudaMemcpyHostToDevice); loaded = true; delete tempMemory; Timer::stop("possibi"); } } int binomialCoeff(int n, int k) { // Base Cases if (k == 0 || k == n) return 1; // Recur return binomialCoeff(n - 1, k - 1) + binomialCoeff(n - 1, k); } int Possibilities::calcPossSize() { int res; const int ruleSum = std::accumulate(getRules().begin(), getRules().end(), 0); const int W = getLineLen() - ruleSum - getRules().size() + 1; /*{ Timer::start("LargeNumber"); LargeNumber n1 = LargeNumber::Factorial(getRules().size() + W); LargeNumber n2 = LargeNumber::Factorial(getRules().size()); LargeNumber n3 = LargeNumber::Factorial(W); n2.Multiply(n3); res1 = n1.Divide(n2); Timer::stop("LargeNumber"); }*/ Timer::start("pos size"); res = binomialCoeff(getRules().size() + W, W); Timer::stop("pos size"); //assert(res1 == res2); return res; } bool Possibilities::isLoaded() const { return loaded; } void Possibilities::fillPossibilities( int startIndex, std::vector<int>::const_iterator currRule, int & insertLineIndex, std::vector<Cell> currLine) { if (currRule == getRules().end()) { memcpy(tempMemory + insertLineIndex * getLineLen(), currLine.data(), sizeof(Cell)*getLineLen()); insertLineIndex++; return; } const int dist = std::distance(currRule, getRules().end()) - 1; const int sum = std::accumulate(currRule, getRules().end(), 0); if (startIndex + dist + sum > getLineLen()) return; if (startIndex + dist + sum + 1 <= getLineLen()) fillPossibilities(startIndex + 1, currRule, insertLineIndex, currLine); paint(currLine, startIndex, *currRule); fillPossibilities(startIndex + *currRule + 1, currRule + 1, insertLineIndex, currLine); } void Possibilities::paint(std::vector<Cell>& line, int startIndex, int rule) { for (int i = startIndex; i < startIndex + rule; i++) { line.at(i) = Cell::BLACK; } }
014c75408d53fbed85b47671d444c7a445aa0091.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define COMPLEX /******************************************************************************/ __global__ void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx, magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ magmaDoubleComplex scale; __shared__ double xnorm; magmaDoubleComplex dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; #ifdef REAL double alpha = *dx0; double alphai = MAGMA_Z_ZERO; if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 1 ) #else magmaDoubleComplex alpha = *dx0; double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha); if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 0 ) #endif { *dtau = MAGMA_Z_ZERO; *dA = *dx0; } else { #ifdef REAL // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = (beta - alpha) / beta; //*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha *dA = beta; } scale = 1. / (alpha - beta); #else // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_Z_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha *dA = MAGMA_Z_MAKE(beta, 0.); } alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha)); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_Z_MUL(dxi, scale); if (j < it) { *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_Z_MAKE(0., 0.); } } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with |beta| = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magma_queue_t queue ) { dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) ); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_zlarfgx_gpu_kernel) , dim3(blocks), dim3(threads), 0, queue->cuda_stream() , n, dx0, dx, dtau, dxnorm, dA, iter); } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with |beta| = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgtx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magmaDoubleComplex_ptr V, magma_int_t ldv, magmaDoubleComplex_ptr T, magma_int_t ldt, magmaDoubleComplex_ptr dwork, magma_queue_t queue ) { /* Generate the elementary reflector H(iter) */ magma_zlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter, queue); if (iter == 0) { magmaDoubleComplex tt = MAGMA_Z_ONE; magmablas_zlacpy( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue ); magma_zsetmatrix( 1, 1, &tt, 1, dx0, 1, queue ); } else { /* Compute the iter-th column of T */ hipLaunchKernelGGL(( magma_zgemv_kernel3) , dim3(iter), dim3(BLOCK_SIZE), 0, queue->cuda_stream() , n, V, ldv, dx0, dwork, dtau ); hipLaunchKernelGGL(( magma_ztrmv_kernel2) , dim3(iter), dim3(iter), 0, queue->cuda_stream() , T, ldt, dwork, T+iter*ldt, dtau ); } }
014c75408d53fbed85b47671d444c7a445aa0091.cu
/* -- MAGMA (version 2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date @precisions normal z -> s d c */ #include "magma_internal.h" #include "commonblas_z.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define COMPLEX /******************************************************************************/ __global__ void magma_zlarfgx_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx, magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex *dA, int it) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ magmaDoubleComplex scale; __shared__ double xnorm; magmaDoubleComplex dxi; if ( j < n-1 ) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; #ifdef REAL double alpha = *dx0; double alphai = MAGMA_Z_ZERO; if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 1 ) #else magmaDoubleComplex alpha = *dx0; double alphar = MAGMA_Z_REAL(alpha), alphai = MAGMA_Z_IMAG(alpha); if ( (xnorm == 0 && alphai == MAGMA_Z_ZERO ) || n == 0 ) #endif { *dtau = MAGMA_Z_ZERO; *dA = *dx0; } else { #ifdef REAL // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = (beta - alpha) / beta; //*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha *dA = beta; } scale = 1. / (alpha - beta); #else // no need to compute the norm as it is passed as input double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) if (j == 0) { *dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta); //*dx0 = MAGMA_Z_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha *dA = MAGMA_Z_MAKE(beta, 0.); } alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha)); scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_Z_MUL(dxi, scale); if (j < it) { *( dA-it+j) = *(dx0-it+j); *(dx0-it+j) = MAGMA_Z_MAKE(0., 0.); } } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with |beta| = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magma_queue_t queue ) { dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) ); dim3 threads( BLOCK_SIZE ); magma_zlarfgx_gpu_kernel <<< blocks, threads, 0, queue->cuda_stream() >>> ( n, dx0, dx, dtau, dxnorm, dA, iter); } /***************************************************************************//** Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with |beta| = norm( [dx0, dx] ) = dxnorm[0]. Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. The difference with LAPACK's zlarfg is that the norm of dx, and hance beta, are computed outside the routine and passed to it in dxnorm (array on the GPU). *******************************************************************************/ extern "C" void magma_zlarfgtx_gpu( magma_int_t n, magmaDoubleComplex_ptr dx0, magmaDoubleComplex_ptr dx, magmaDoubleComplex_ptr dtau, magmaDouble_ptr dxnorm, magmaDoubleComplex_ptr dA, magma_int_t iter, magmaDoubleComplex_ptr V, magma_int_t ldv, magmaDoubleComplex_ptr T, magma_int_t ldt, magmaDoubleComplex_ptr dwork, magma_queue_t queue ) { /* Generate the elementary reflector H(iter) */ magma_zlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter, queue); if (iter == 0) { magmaDoubleComplex tt = MAGMA_Z_ONE; magmablas_zlacpy( MagmaFull, 1, 1, dtau, 1, T+iter+iter*ldt, 1, queue ); magma_zsetmatrix( 1, 1, &tt, 1, dx0, 1, queue ); } else { /* Compute the iter-th column of T */ magma_zgemv_kernel3 <<< iter, BLOCK_SIZE, 0, queue->cuda_stream() >>> ( n, V, ldv, dx0, dwork, dtau ); magma_ztrmv_kernel2 <<< iter, iter, 0, queue->cuda_stream() >>> ( T, ldt, dwork, T+iter*ldt, dtau ); } }
13884e80246b9540acb4542e28ab3bfb215e7655.hip
// !!! This is a file automatically generated by hipify!!! //#include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> //#include "device_launch_parameters.h" #define clip(minv, maxv, value) ((value)<minv) ? minv : (((value)>maxv) ? maxv : (value)) __global__ void kernel_scale( float *scale, const unsigned char *src, int src_Width, int src_Height, int scale_step) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; int c_step=((src_Width)>>scale_step); int sx = (x<<scale_step); int sy = (y<<scale_step); int src_step = src_Width*4; const unsigned char *pS; if(x >= (src_Width>>scale_step) || y >= (src_Height>>scale_step)) return; { unsigned char R, G, B; pS = src + (sy*src_step + sx*4); B = pS[0] ;//= 128;//clip(0, 255, (src[src_y*src_step + src_x*3] * exposure[y*dst_Width + x])); G = pS[1];// = 128;//clip(0, 255, (src[src_y*src_step + src_x*3 +1] * exposure[y*dst_Width + x])); R = pS[2];// = 128;//clip(0, 255, (src[src_y*src_step + src_x*3 +2] * exposure[y*dst_Width + x])); //if((y&c_mask)==0 && (x&c_mask)==0) { scale[y*c_step+x] = 0.299*R + 0.587*G + 0.114*B; } } } #define DESCALE(x, n) (((x) + (1 << ((n)-1)))>>(n)) #define COEFFS_0 (22987) #define COEFFS_1 (-11698) #define COEFFS_2 (-5636) #define COEFFS_3 (29049) __global__ void kernel_uyvy2bgr( unsigned char *dst, const unsigned char *src, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= (width>>1) || y >= height) return; { int Y1, Y2, U, V; int r, g, b; int si = y*width*2 + x*4; int di = y*width*3 + x*6; U = src[si+0]; Y1 = src[si+1]; V = src[si+2]; Y2 = src[si+3]; b = DESCALE((U - 128)*COEFFS_3, 14); g = DESCALE((U - 128)*COEFFS_2 + (V - 128)*COEFFS_1, 14); r = DESCALE((V - 128)*COEFFS_0, 14); dst[di+0] = clip(0, 255, Y1 + b);//B dst[di+1] = clip(0, 255, Y1 + g);//G dst[di+2] = clip(0, 255, Y1 + r);//R dst[di+3] = clip(0, 255, Y2 + b);//B dst[di+4] = clip(0, 255, Y2 + g);//G dst[di+5] = clip(0, 255, Y2 + r);//R } } __global__ void kernel_yuyv2bgr( unsigned char *dst, const unsigned char *src, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= (width>>1) || y >= height) return; { int Y1, Y2, U, V; int r, g, b; int si = y*width*2 + x*4; int di = y*width*3 + x*6; Y1 = src[si+0]; U = src[si+1]; Y2 = src[si+2]; V = src[si+3]; b = DESCALE((U - 128)*COEFFS_3, 14); g = DESCALE((U - 128)*COEFFS_2 + (V - 128)*COEFFS_1, 14); r = DESCALE((V - 128)*COEFFS_0, 14); dst[di+0] = clip(0, 255, Y1 + b);//B dst[di+1] = clip(0, 255, Y1 + g);//G dst[di+2] = clip(0, 255, Y1 + r);//R dst[di+3] = clip(0, 255, Y2 + b);//B dst[di+4] = clip(0, 255, Y2 + g);//G dst[di+5] = clip(0, 255, Y2 + r);//R } } __device__ void deInterlace(unsigned char top, unsigned char bot, unsigned char mid, unsigned char *dst) { *dst = clip(0, 255, (int)top + bot + mid- min(min(top, bot), mid) - max(max(top, bot), mid)); } __device__ void deInterlaceUV(int top, int bot, int mid, unsigned char *dst) { *dst = clip(0, 255, top + bot + mid- min(min(top, bot), mid) - max(max(top, bot), mid)); } __device__ void deInterlaceY(unsigned char top[], unsigned char bot[], unsigned char mid, unsigned char *dst) { const int thred = 50; int grd = abs(top[0] - mid) + abs(top[1] - mid) + abs(top[2] - mid) + abs(bot[0] - mid) + abs(bot[1] - mid) + abs(bot[2] - mid); if(grd > thred){ int grda = abs(top[0] - bot[2]); int grdb = abs(top[1] - bot[1]); int grdc = abs(top[2] - bot[0]); if( (grda < grdb) && (grda < grdc) ) { *dst = top[0] + bot[2] + mid -min(min(top[0], bot[2]), mid) - max(max(top[0], bot[2]), mid);//medthr(a_1, b1, d); } else if( (grdc < grda) && (grdc < grdb) ) { *dst = top[2] + bot[0] + mid - min(min(top[2], bot[0]), mid) - max(max(top[2], bot[0]), mid);//medthr(a1, b_1, d); } else { *dst = top[1] + bot[1] + mid- min(min(top[1], bot[1]), mid) - max(max(top[1], bot[1]), mid);//medthr(a, b, d); } } else { *dst = mid; } } #if 0 __global__ void kernel_dei( unsigned char *src, int width, int height) { //dei int x = blockIdx.x * (blockDim.x ) + (threadIdx.x ); int y = blockIdx.y * (blockDim.y ) + (threadIdx.y); if((x+1) >= width || (y+1) >= height) return; //return; width = width * 2; x = x*2;//x*4; y = y*2; int offset_top = (y + 0) * width + x; //top point int offset_mid = (y + 1) * width + x; //mid point int offset_bot = (y + 2) * width + x; //bot point unsigned char topY[3], botY[3], midY; unsigned char topUV, botUV, midUV; topUV = src[offset_top + 0]; //U or V botUV = src[offset_bot + 0]; midUV = src[offset_mid + 0]; topY[0] = src[offset_top - 1]; //Y topY[1] = src[offset_top + 1]; topY[2] = src[offset_top + 3]; botY[0] = src[offset_bot - 1]; botY[1] = src[offset_bot + 1]; botY[2] = src[offset_bot + 3]; midY = src[offset_mid + 1]; //select the middle pixel to replace the mid one //deInterlaceUV(topUV, botUV , midUV, &src[offset_mid + 0]); deInterlaceY(topY, botY , midY, &src[offset_mid + 1]); } #else __global__ void kernel_dei(unsigned char *src, int width, int height) { //dei int x = blockIdx.x * (blockDim.x ) + (threadIdx.x ); int y = blockIdx.y * (blockDim.y ) + (threadIdx.y); if((x+1) >= width || (y+1) >= height) return; //return; width = width * 2; x = x * 4; y = y*2; int offset_top = (y + 0) * width + x; //top point int offset_mid = (y + 1) * width + x; //mid point int offset_bot = (y + 2) * width + x; //bot point unsigned char top[4], bot[4], mid[4]; top[0] = src[offset_top + 0]; //U bot[0] = src[offset_bot + 0]; mid[0] = src[offset_mid + 0]; top[1] = src[offset_top + 1]; //Y bot[1] = src[offset_bot + 1]; mid[1] = src[offset_mid + 1]; top[2] = src[offset_top + 2]; //V bot[2] = src[offset_bot + 2]; mid[2] = src[offset_mid + 2]; top[3] = src[offset_top + 3]; //Y bot[3] = src[offset_bot + 3]; mid[3] = src[offset_mid + 3]; //select the middle pixel to replace the mid one // deInterlace(top[0], bot[0] , mid[0], &src[offset_mid + 0]); deInterlace(top[1], bot[1] , mid[1], &src[offset_mid + 1]); // deInterlace(top[2], bot[2] , mid[2], &src[offset_mid + 2]); deInterlace(top[3], bot[3] , mid[3], &src[offset_mid + 3]); } #endif extern "C" int uyvydei_( unsigned char *dst, int width, int height) { dim3 block((width/2+31)/32,(height/2+31)/32); dim3 thread(32, 32); //dim3 block((dst_Width+127)/128,(dst_Height+127)/128); //dim3 thread(128, 128); hipLaunchKernelGGL(( kernel_dei), dim3(block), dim3(thread), 0, 0, dst, width, height); return 0; } #if 1 extern "C" int uyvy2bgr_( unsigned char *dst, const unsigned char *src, int width, int height, hipStream_t stream) { //dim3 block((width/2+15)/16, (height+63)/64); //dim3 thread(16, 64); dim3 block((width/2+31)/32,(height+31)/32); dim3 thread(32, 32); //dim3 block((width/2+127)/128,(height+127)/128); //dim3 thread(128, 128); //kernel_uyvy2bgr_and_sphere_tp_erect<<<block, thread>>>(dst, src, width, height); hipLaunchKernelGGL(( kernel_uyvy2bgr), dim3(block), dim3(thread), 0, stream, dst, src, width, height); return 0; } extern "C" int yuyv2bgr_( unsigned char *dst, const unsigned char *src, int width, int height, hipStream_t stream) { //dim3 block((width/2+15)/16, (height+63)/64); //dim3 thread(16, 64); dim3 block((width/2+31)/32,(height+31)/32); dim3 thread(32, 32); //dim3 block((width/2+127)/128,(height+127)/128); //dim3 thread(128, 128); //kernel_uyvy2bgr_and_sphere_tp_erect<<<block, thread>>>(dst, src, width, height); hipLaunchKernelGGL(( kernel_yuyv2bgr), dim3(block), dim3(thread), 0, stream, dst, src, width, height); return 0; } #else extern "C" int uyvy2bgr_( unsigned char *dst, const unsigned char *src, int width, int height) { dim3 block((width/2+15)/16, (height+63)/64); dim3 thread(16, 64); hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>(); hipBindTexture2D(NULL, texIn, src, desc, width*2, height, width*2); hipLaunchKernelGGL(( kernel_uyvy2bgr), dim3(block), dim3(thread), 0, 0, dst, src, width, height); hipUnbindTexture(texIn); return 0; } #endif extern "C" int kernel_scale_( float *scale, const unsigned char *src, int width, int height, int scale_step) { dim3 block(((width>>scale_step)+31)/32,((height>>scale_step)+31)/32); dim3 thread(32, 32); //dim3 block((dst_Width+127)/128,(dst_Height+127)/128); //dim3 thread(128, 128); hipLaunchKernelGGL(( kernel_scale), dim3(block), dim3(thread), 0, 0, scale, src, width, height, scale_step); return 0; } __global__ void kernel_yuyv2bgr_ext( unsigned char *dst, const unsigned char *src, unsigned char *gray, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= (width>>1) || y >= height) return; { int Y1, Y2, U, V; int r, g, b; int si = y*width*2 + x*4; int di = y*width*3 + x*6; Y1 = src[si+0]; U = src[si+1]; Y2 = src[si+2]; V = src[si+3]; gray[y*width + x*2 + 0] = Y1; gray[y*width + x*2 + 1] = Y2; b = DESCALE((U - 128)*COEFFS_3, 14); g = DESCALE((U - 128)*COEFFS_2 + (V - 128)*COEFFS_1, 14); r = DESCALE((V - 128)*COEFFS_0, 14); dst[di+0] = clip(0, 255, Y1 + b);//B dst[di+1] = clip(0, 255, Y1 + g);//G dst[di+2] = clip(0, 255, Y1 + r);//R dst[di+3] = clip(0, 255, Y2 + b);//B dst[di+4] = clip(0, 255, Y2 + g);//G dst[di+5] = clip(0, 255, Y2 + r);//R } } extern "C" int yuyv2bgr_ext_( unsigned char *dst, const unsigned char *src, unsigned char *gray, int width, int height, hipStream_t stream) { //dim3 block((width/2+15)/16, (height+63)/64); //dim3 thread(16, 64); dim3 block((width/2+31)/32,(height+31)/32); dim3 thread(32, 32); //dim3 block((width/2+127)/128,(height+127)/128); //dim3 thread(128, 128); //kernel_uyvy2bgr_and_sphere_tp_erect<<<block, thread>>>(dst, src, width, height); hipLaunchKernelGGL(( kernel_yuyv2bgr_ext), dim3(block), dim3(thread), 0, stream, dst, src, gray, width, height); return 0; } __global__ void kernel_yuyv2gray( unsigned char *dst, const unsigned char *src, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= width || y >= height) return; dst[y*width + x] = src[y*width*2 + x*2]; } extern "C" int yuyv2gray_( unsigned char *dst, const unsigned char *src, int width, int height, hipStream_t stream) { dim3 block((width+31)/32,(height+31)/32); dim3 thread(32, 32); hipLaunchKernelGGL(( kernel_yuyv2gray), dim3(block), dim3(thread), 0, stream, dst, src, width, height); return 0; } __global__ void kernel_yuyv2yuvplan( unsigned char *dsty, unsigned char *dstu, unsigned char *dstv, const unsigned char *src, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int uvwidth = (width>>1); if(x >= (width>>1) || y >= height) return; dsty[y*width + 2*x] = src[y*width*2 + x*4]; dstu[y*uvwidth + x] = src[y*width*2 + x*4+1]; dsty[y*width + 2*x+1] = src[y*width*2 + x*4+2]; dstv[y*uvwidth + x] = src[y*width*2 + x*4+3]; } extern "C" int yuyv2yuvplan_( unsigned char *dsty, unsigned char *dstu,unsigned char *dstv,const unsigned char *src, int width, int height, hipStream_t stream) { dim3 block((width/2+31)/32,(height+31)/32); dim3 thread(32, 32); hipLaunchKernelGGL(( kernel_yuyv2yuvplan), dim3(block), dim3(thread), 0, stream, dsty,dstu,dstv, src, width, height); return 0; }
13884e80246b9540acb4542e28ab3bfb215e7655.cu
//#include <stdio.h> #include <assert.h> #include <cuda_runtime.h> //#include "device_launch_parameters.h" #define clip(minv, maxv, value) ((value)<minv) ? minv : (((value)>maxv) ? maxv : (value)) __global__ void kernel_scale( float *scale, const unsigned char *src, int src_Width, int src_Height, int scale_step) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; int c_step=((src_Width)>>scale_step); int sx = (x<<scale_step); int sy = (y<<scale_step); int src_step = src_Width*4; const unsigned char *pS; if(x >= (src_Width>>scale_step) || y >= (src_Height>>scale_step)) return; { unsigned char R, G, B; pS = src + (sy*src_step + sx*4); B = pS[0] ;//= 128;//clip(0, 255, (src[src_y*src_step + src_x*3] * exposure[y*dst_Width + x])); G = pS[1];// = 128;//clip(0, 255, (src[src_y*src_step + src_x*3 +1] * exposure[y*dst_Width + x])); R = pS[2];// = 128;//clip(0, 255, (src[src_y*src_step + src_x*3 +2] * exposure[y*dst_Width + x])); //if((y&c_mask)==0 && (x&c_mask)==0) { scale[y*c_step+x] = 0.299*R + 0.587*G + 0.114*B; } } } #define DESCALE(x, n) (((x) + (1 << ((n)-1)))>>(n)) #define COEFFS_0 (22987) #define COEFFS_1 (-11698) #define COEFFS_2 (-5636) #define COEFFS_3 (29049) __global__ void kernel_uyvy2bgr( unsigned char *dst, const unsigned char *src, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= (width>>1) || y >= height) return; { int Y1, Y2, U, V; int r, g, b; int si = y*width*2 + x*4; int di = y*width*3 + x*6; U = src[si+0]; Y1 = src[si+1]; V = src[si+2]; Y2 = src[si+3]; b = DESCALE((U - 128)*COEFFS_3, 14); g = DESCALE((U - 128)*COEFFS_2 + (V - 128)*COEFFS_1, 14); r = DESCALE((V - 128)*COEFFS_0, 14); dst[di+0] = clip(0, 255, Y1 + b);//B dst[di+1] = clip(0, 255, Y1 + g);//G dst[di+2] = clip(0, 255, Y1 + r);//R dst[di+3] = clip(0, 255, Y2 + b);//B dst[di+4] = clip(0, 255, Y2 + g);//G dst[di+5] = clip(0, 255, Y2 + r);//R } } __global__ void kernel_yuyv2bgr( unsigned char *dst, const unsigned char *src, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= (width>>1) || y >= height) return; { int Y1, Y2, U, V; int r, g, b; int si = y*width*2 + x*4; int di = y*width*3 + x*6; Y1 = src[si+0]; U = src[si+1]; Y2 = src[si+2]; V = src[si+3]; b = DESCALE((U - 128)*COEFFS_3, 14); g = DESCALE((U - 128)*COEFFS_2 + (V - 128)*COEFFS_1, 14); r = DESCALE((V - 128)*COEFFS_0, 14); dst[di+0] = clip(0, 255, Y1 + b);//B dst[di+1] = clip(0, 255, Y1 + g);//G dst[di+2] = clip(0, 255, Y1 + r);//R dst[di+3] = clip(0, 255, Y2 + b);//B dst[di+4] = clip(0, 255, Y2 + g);//G dst[di+5] = clip(0, 255, Y2 + r);//R } } __device__ void deInterlace(unsigned char top, unsigned char bot, unsigned char mid, unsigned char *dst) { *dst = clip(0, 255, (int)top + bot + mid- min(min(top, bot), mid) - max(max(top, bot), mid)); } __device__ void deInterlaceUV(int top, int bot, int mid, unsigned char *dst) { *dst = clip(0, 255, top + bot + mid- min(min(top, bot), mid) - max(max(top, bot), mid)); } __device__ void deInterlaceY(unsigned char top[], unsigned char bot[], unsigned char mid, unsigned char *dst) { const int thred = 50; int grd = abs(top[0] - mid) + abs(top[1] - mid) + abs(top[2] - mid) + abs(bot[0] - mid) + abs(bot[1] - mid) + abs(bot[2] - mid); if(grd > thred){ int grda = abs(top[0] - bot[2]); int grdb = abs(top[1] - bot[1]); int grdc = abs(top[2] - bot[0]); if( (grda < grdb) && (grda < grdc) ) { *dst = top[0] + bot[2] + mid -min(min(top[0], bot[2]), mid) - max(max(top[0], bot[2]), mid);//medthr(a_1, b1, d); } else if( (grdc < grda) && (grdc < grdb) ) { *dst = top[2] + bot[0] + mid - min(min(top[2], bot[0]), mid) - max(max(top[2], bot[0]), mid);//medthr(a1, b_1, d); } else { *dst = top[1] + bot[1] + mid- min(min(top[1], bot[1]), mid) - max(max(top[1], bot[1]), mid);//medthr(a, b, d); } } else { *dst = mid; } } #if 0 __global__ void kernel_dei( unsigned char *src, int width, int height) { //dei int x = blockIdx.x * (blockDim.x ) + (threadIdx.x ); int y = blockIdx.y * (blockDim.y ) + (threadIdx.y); if((x+1) >= width || (y+1) >= height) return; //return; width = width * 2; x = x*2;//x*4; y = y*2; int offset_top = (y + 0) * width + x; //top point int offset_mid = (y + 1) * width + x; //mid point int offset_bot = (y + 2) * width + x; //bot point unsigned char topY[3], botY[3], midY; unsigned char topUV, botUV, midUV; topUV = src[offset_top + 0]; //U or V botUV = src[offset_bot + 0]; midUV = src[offset_mid + 0]; topY[0] = src[offset_top - 1]; //Y topY[1] = src[offset_top + 1]; topY[2] = src[offset_top + 3]; botY[0] = src[offset_bot - 1]; botY[1] = src[offset_bot + 1]; botY[2] = src[offset_bot + 3]; midY = src[offset_mid + 1]; //select the middle pixel to replace the mid one //deInterlaceUV(topUV, botUV , midUV, &src[offset_mid + 0]); deInterlaceY(topY, botY , midY, &src[offset_mid + 1]); } #else __global__ void kernel_dei(unsigned char *src, int width, int height) { //dei int x = blockIdx.x * (blockDim.x ) + (threadIdx.x ); int y = blockIdx.y * (blockDim.y ) + (threadIdx.y); if((x+1) >= width || (y+1) >= height) return; //return; width = width * 2; x = x * 4; y = y*2; int offset_top = (y + 0) * width + x; //top point int offset_mid = (y + 1) * width + x; //mid point int offset_bot = (y + 2) * width + x; //bot point unsigned char top[4], bot[4], mid[4]; top[0] = src[offset_top + 0]; //U bot[0] = src[offset_bot + 0]; mid[0] = src[offset_mid + 0]; top[1] = src[offset_top + 1]; //Y bot[1] = src[offset_bot + 1]; mid[1] = src[offset_mid + 1]; top[2] = src[offset_top + 2]; //V bot[2] = src[offset_bot + 2]; mid[2] = src[offset_mid + 2]; top[3] = src[offset_top + 3]; //Y bot[3] = src[offset_bot + 3]; mid[3] = src[offset_mid + 3]; //select the middle pixel to replace the mid one // deInterlace(top[0], bot[0] , mid[0], &src[offset_mid + 0]); deInterlace(top[1], bot[1] , mid[1], &src[offset_mid + 1]); // deInterlace(top[2], bot[2] , mid[2], &src[offset_mid + 2]); deInterlace(top[3], bot[3] , mid[3], &src[offset_mid + 3]); } #endif extern "C" int uyvydei_( unsigned char *dst, int width, int height) { dim3 block((width/2+31)/32,(height/2+31)/32); dim3 thread(32, 32); //dim3 block((dst_Width+127)/128,(dst_Height+127)/128); //dim3 thread(128, 128); kernel_dei<<<block, thread>>>(dst, width, height); return 0; } #if 1 extern "C" int uyvy2bgr_( unsigned char *dst, const unsigned char *src, int width, int height, cudaStream_t stream) { //dim3 block((width/2+15)/16, (height+63)/64); //dim3 thread(16, 64); dim3 block((width/2+31)/32,(height+31)/32); dim3 thread(32, 32); //dim3 block((width/2+127)/128,(height+127)/128); //dim3 thread(128, 128); //kernel_uyvy2bgr_and_sphere_tp_erect<<<block, thread>>>(dst, src, width, height); kernel_uyvy2bgr<<<block, thread, 0, stream>>>(dst, src, width, height); return 0; } extern "C" int yuyv2bgr_( unsigned char *dst, const unsigned char *src, int width, int height, cudaStream_t stream) { //dim3 block((width/2+15)/16, (height+63)/64); //dim3 thread(16, 64); dim3 block((width/2+31)/32,(height+31)/32); dim3 thread(32, 32); //dim3 block((width/2+127)/128,(height+127)/128); //dim3 thread(128, 128); //kernel_uyvy2bgr_and_sphere_tp_erect<<<block, thread>>>(dst, src, width, height); kernel_yuyv2bgr<<<block, thread, 0, stream>>>(dst, src, width, height); return 0; } #else extern "C" int uyvy2bgr_( unsigned char *dst, const unsigned char *src, int width, int height) { dim3 block((width/2+15)/16, (height+63)/64); dim3 thread(16, 64); cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>(); cudaBindTexture2D(NULL, texIn, src, desc, width*2, height, width*2); kernel_uyvy2bgr<<<block, thread>>>(dst, src, width, height); cudaUnbindTexture(texIn); return 0; } #endif extern "C" int kernel_scale_( float *scale, const unsigned char *src, int width, int height, int scale_step) { dim3 block(((width>>scale_step)+31)/32,((height>>scale_step)+31)/32); dim3 thread(32, 32); //dim3 block((dst_Width+127)/128,(dst_Height+127)/128); //dim3 thread(128, 128); kernel_scale<<<block, thread>>>(scale, src, width, height, scale_step); return 0; } __global__ void kernel_yuyv2bgr_ext( unsigned char *dst, const unsigned char *src, unsigned char *gray, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= (width>>1) || y >= height) return; { int Y1, Y2, U, V; int r, g, b; int si = y*width*2 + x*4; int di = y*width*3 + x*6; Y1 = src[si+0]; U = src[si+1]; Y2 = src[si+2]; V = src[si+3]; gray[y*width + x*2 + 0] = Y1; gray[y*width + x*2 + 1] = Y2; b = DESCALE((U - 128)*COEFFS_3, 14); g = DESCALE((U - 128)*COEFFS_2 + (V - 128)*COEFFS_1, 14); r = DESCALE((V - 128)*COEFFS_0, 14); dst[di+0] = clip(0, 255, Y1 + b);//B dst[di+1] = clip(0, 255, Y1 + g);//G dst[di+2] = clip(0, 255, Y1 + r);//R dst[di+3] = clip(0, 255, Y2 + b);//B dst[di+4] = clip(0, 255, Y2 + g);//G dst[di+5] = clip(0, 255, Y2 + r);//R } } extern "C" int yuyv2bgr_ext_( unsigned char *dst, const unsigned char *src, unsigned char *gray, int width, int height, cudaStream_t stream) { //dim3 block((width/2+15)/16, (height+63)/64); //dim3 thread(16, 64); dim3 block((width/2+31)/32,(height+31)/32); dim3 thread(32, 32); //dim3 block((width/2+127)/128,(height+127)/128); //dim3 thread(128, 128); //kernel_uyvy2bgr_and_sphere_tp_erect<<<block, thread>>>(dst, src, width, height); kernel_yuyv2bgr_ext<<<block, thread, 0, stream>>>(dst, src, gray, width, height); return 0; } __global__ void kernel_yuyv2gray( unsigned char *dst, const unsigned char *src, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if(x >= width || y >= height) return; dst[y*width + x] = src[y*width*2 + x*2]; } extern "C" int yuyv2gray_( unsigned char *dst, const unsigned char *src, int width, int height, cudaStream_t stream) { dim3 block((width+31)/32,(height+31)/32); dim3 thread(32, 32); kernel_yuyv2gray<<<block, thread, 0, stream>>>(dst, src, width, height); return 0; } __global__ void kernel_yuyv2yuvplan( unsigned char *dsty, unsigned char *dstu, unsigned char *dstv, const unsigned char *src, int width, int height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int uvwidth = (width>>1); if(x >= (width>>1) || y >= height) return; dsty[y*width + 2*x] = src[y*width*2 + x*4]; dstu[y*uvwidth + x] = src[y*width*2 + x*4+1]; dsty[y*width + 2*x+1] = src[y*width*2 + x*4+2]; dstv[y*uvwidth + x] = src[y*width*2 + x*4+3]; } extern "C" int yuyv2yuvplan_( unsigned char *dsty, unsigned char *dstu,unsigned char *dstv,const unsigned char *src, int width, int height, cudaStream_t stream) { dim3 block((width/2+31)/32,(height+31)/32); dim3 thread(32, 32); kernel_yuyv2yuvplan<<<block, thread, 0, stream>>>(dsty,dstu,dstv, src, width, height); return 0; }
31d4f6a246f07cf1cf791d6f5788cb7d3d801d01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* .cuda.cu - Copyright 2019/2020 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include ".cuda.h" namespace lh2core { // path tracing buffers and global variables __constant__ CoreInstanceDesc* instanceDescriptors; __constant__ CUDAMaterial* materials; __constant__ CoreLightTri* triLights; __constant__ CorePointLight* pointLights; __constant__ CoreSpotLight* spotLights; __constant__ CoreDirectionalLight* directionalLights; __constant__ int4 lightCounts; // area, point, spot, directional __constant__ uchar4* argb32; __constant__ float4* argb128; __constant__ uchar4* nrm32; __constant__ float4* skyPixels; __constant__ int skywidth; __constant__ int skyheight; __constant__ float4* debugData; __constant__ LightCluster* lightTree; __constant__ mat4 worldToSky; // path tracer settings __constant__ __device__ float geometryEpsilon; __constant__ __device__ float clampValue; // staging: copies will be batched and carried out after rendering completes, // to allow the CPU to update the scene concurrently with GPU rendering. enum { INSTS = 0, MATS, TLGHTS, PLGHTS, SLGHTS, DLGHTS, LCNTS, RGB32, RGBH, NRMLS, SKYPIX, SKYW, SKYH, SMAT, DBGDAT, GEPS, CLMPV, LTREE }; // device pointers are not real pointers for nvcc, so we need a bit of a hack. struct StagedPtr { void* p; int id; }; struct StagedInt { int v; int id; }; struct StagedInt4 { int4 v; int id; }; struct StagedMat { mat4 v; int id; }; struct StagedF32 { float v; int id; }; struct StagedCpy { void* d; void* s; int n; }; static std::vector<StagedPtr> stagedPtr; static std::vector<StagedInt> stagedInt; static std::vector<StagedInt4> stagedInt4; static std::vector<StagedMat> stagedMat; static std::vector<StagedF32> stagedF32; static std::vector<StagedCpy> stagedCpy; __host__ static void pushPtrCpy( int id, void* p ) { if (id == INSTS) hipMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); if (id == MATS) hipMemcpyToSymbol( materials, &p, sizeof( void* ) ); if (id == TLGHTS) hipMemcpyToSymbol( triLights, &p, sizeof( void* ) ); if (id == PLGHTS) hipMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); if (id == SLGHTS) hipMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); if (id == DLGHTS) hipMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); if (id == RGB32) hipMemcpyToSymbol( argb32, &p, sizeof( void* ) ); if (id == RGBH) hipMemcpyToSymbol( argb128, &p, sizeof( void* ) ); if (id == NRMLS) hipMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); if (id == SKYPIX) hipMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); if (id == DBGDAT) hipMemcpyToSymbol( debugData, &p, sizeof( void* ) ); if (id == LTREE) hipMemcpyToSymbol( lightTree, &p, sizeof( void* ) ); } __host__ static void pushIntCpy( int id, const int v ) { if (id == SKYW) hipMemcpyToSymbol( skywidth, &v, sizeof( int ) ); if (id == SKYH) hipMemcpyToSymbol( skyheight, &v, sizeof( int ) ); } __host__ static void pushF32Cpy( int id, const float v ) { if (id == GEPS) hipMemcpyToSymbol( geometryEpsilon, &v, sizeof( float ) ); if (id == CLMPV) hipMemcpyToSymbol( clampValue, &v, sizeof( int ) ); } __host__ static void pushMatCpy( int id, const mat4& m ) { if (id == SMAT) hipMemcpyToSymbol( worldToSky, &m, sizeof( mat4 ) ); } __host__ static void pushInt4Cpy( int id, const int4& v ) { if (id == LCNTS) hipMemcpyToSymbol( lightCounts, &v, sizeof( int4 ) ); } #define MAXVARS 32 static void* prevPtr[MAXVARS] = {}; static int prevInt[MAXVARS] = {}; static float prevFloat[MAXVARS] = {}; static int4 prevInt4[MAXVARS] = {}; static bool prevValSet[MAXVARS] = {}; __host__ static void stagePtrCpy( int id, void* p ) { if (prevPtr[id] == p) return; // not changed StagedPtr n = { p, id }; stagedPtr.push_back( n ); prevPtr[id] = p; } __host__ static void stageIntCpy( int id, const int v ) { if (prevValSet[id] == true && prevInt[id] == v) return; StagedInt n = { v, id }; stagedInt.push_back( n ); prevValSet[id] = true; prevInt[id] = v; } __host__ static void stageF32Cpy( int id, const float v ) { if (prevValSet[id] == true && prevFloat[id] == v) return; StagedF32 n = { v, id }; stagedF32.push_back( n ); prevValSet[id] = true; prevFloat[id] = v; } __host__ static void stageMatCpy( int id, const mat4& m ) { StagedMat n = { m, id }; stagedMat.push_back( n ); } __host__ static void stageInt4Cpy( int id, const int4& v ) { if (prevValSet[id] == true && prevInt4[id].x == v.x && prevInt4[id].y == v.y && prevInt4[id].z == v.z && prevInt4[id].w == v.w) return; StagedInt4 n = { v, id }; stagedInt4.push_back( n ); prevValSet[id] = true; prevInt4[id] = v; } __host__ void stageMemcpy( void* d, void* s, int n ) { StagedCpy c = { d, s, n }; stagedCpy.push_back( c ); } __host__ void stageInstanceDescriptors( CoreInstanceDesc* p ) { stagePtrCpy( INSTS /* instanceDescriptors */, p ); } __host__ void stageMaterialList( CUDAMaterial* p ) { stagePtrCpy( MATS /* materials */, p ); } __host__ void stageTriLights( CoreLightTri* p ) { stagePtrCpy( TLGHTS /* triLights */, p ); } __host__ void stagePointLights( CorePointLight* p ) { stagePtrCpy( PLGHTS /* pointLights */, p ); } __host__ void stageSpotLights( CoreSpotLight* p ) { stagePtrCpy( SLGHTS /* spotLights */, p ); } __host__ void stageDirectionalLights( CoreDirectionalLight* p ) { stagePtrCpy( DLGHTS /* directionalLights */, p ); } __host__ void stageARGB32Pixels( uint* p ) { stagePtrCpy( RGB32 /* argb32 */, p ); } __host__ void stageARGB128Pixels( float4* p ) { stagePtrCpy( RGBH /* argb128 */, p ); } __host__ void stageNRM32Pixels( uint* p ) { stagePtrCpy( NRMLS /* nrm32 */, p ); } __host__ void stageSkyPixels( float4* p ) { stagePtrCpy( SKYPIX /* skyPixels */, p ); } __host__ void stageSkySize( int w, int h ) { stageIntCpy( SKYW /* skywidth */, w ); stageIntCpy( SKYH /* skyheight */, h ); } __host__ void stageWorldToSky( const mat4& worldToLight ) { stageMatCpy( SMAT /* worldToSky */, worldToLight ); } __host__ void stageDebugData( float4* p ) { stagePtrCpy( DBGDAT /* debugData */, p ); } __host__ void stageGeometryEpsilon( float e ) { stageF32Cpy( GEPS /* geometryEpsilon */, e ); } __host__ void stageClampValue( float c ) { stageF32Cpy( CLMPV /* clampValue */, c ); } __host__ void stageLightTree( LightCluster* t ) { stagePtrCpy( LTREE /* light tree */, t ); } __host__ void stageLightCounts( int tri, int point, int spot, int directional ) { const int4 counts = make_int4( tri, point, spot, directional ); stageInt4Cpy( LCNTS /* lightCounts */, counts ); } __host__ void pushStagedCopies() { for (auto c : stagedCpy) hipMemcpy( c.d, c.s, c.n, hipMemcpyHostToDevice ); stagedCpy.clear(); for (auto n : stagedPtr) pushPtrCpy( n.id, n.p ); stagedPtr.clear(); for (auto n : stagedInt) pushIntCpy( n.id, n.v ); stagedInt.clear(); for (auto n : stagedInt4) pushInt4Cpy( n.id, n.v ); stagedInt4.clear(); for (auto n : stagedF32) pushF32Cpy( n.id, n.v ); stagedF32.clear(); for (auto n : stagedMat) pushMatCpy( n.id, n.v ); stagedMat.clear(); } // counters for persistent threads static __device__ Counters* counters; __global__ void InitCountersForExtend_Kernel( int pathCount ) { if (threadIdx.x != 0) return; counters->activePaths = pathCount; // remaining active paths counters->extensionRays = 0; // compaction counter for extension rays counters->shadowRays = 0; // compaction counter for connections counters->totalExtensionRays = pathCount; counters->totalShadowRays = 0; } __host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); } __global__ void InitCountersSubsequent_Kernel() { if (threadIdx.x != 0) return; counters->totalExtensionRays += counters->extensionRays; counters->activePaths = counters->extensionRays; // remaining active paths counters->extensionRays = 0; // compaction counter for extension rays } __host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); } __host__ void SetCounters( Counters* p ) { hipMemcpyToSymbol( counters, &p, sizeof( void* ) ); } // functional blocks #include "tools_shared.h" #include "sampling_shared.h" #include "material_shared.h" #include "lights_shared.h" #include "bsdf.h" #include "camera.h" #include "pathtracer.h" #include "finalize_shared.h" #include "connections.h" } // namespace lh2core // EOF
31d4f6a246f07cf1cf791d6f5788cb7d3d801d01.cu
/* .cuda.cu - Copyright 2019/2020 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include ".cuda.h" namespace lh2core { // path tracing buffers and global variables __constant__ CoreInstanceDesc* instanceDescriptors; __constant__ CUDAMaterial* materials; __constant__ CoreLightTri* triLights; __constant__ CorePointLight* pointLights; __constant__ CoreSpotLight* spotLights; __constant__ CoreDirectionalLight* directionalLights; __constant__ int4 lightCounts; // area, point, spot, directional __constant__ uchar4* argb32; __constant__ float4* argb128; __constant__ uchar4* nrm32; __constant__ float4* skyPixels; __constant__ int skywidth; __constant__ int skyheight; __constant__ float4* debugData; __constant__ LightCluster* lightTree; __constant__ mat4 worldToSky; // path tracer settings __constant__ __device__ float geometryEpsilon; __constant__ __device__ float clampValue; // staging: copies will be batched and carried out after rendering completes, // to allow the CPU to update the scene concurrently with GPU rendering. enum { INSTS = 0, MATS, TLGHTS, PLGHTS, SLGHTS, DLGHTS, LCNTS, RGB32, RGBH, NRMLS, SKYPIX, SKYW, SKYH, SMAT, DBGDAT, GEPS, CLMPV, LTREE }; // device pointers are not real pointers for nvcc, so we need a bit of a hack. struct StagedPtr { void* p; int id; }; struct StagedInt { int v; int id; }; struct StagedInt4 { int4 v; int id; }; struct StagedMat { mat4 v; int id; }; struct StagedF32 { float v; int id; }; struct StagedCpy { void* d; void* s; int n; }; static std::vector<StagedPtr> stagedPtr; static std::vector<StagedInt> stagedInt; static std::vector<StagedInt4> stagedInt4; static std::vector<StagedMat> stagedMat; static std::vector<StagedF32> stagedF32; static std::vector<StagedCpy> stagedCpy; __host__ static void pushPtrCpy( int id, void* p ) { if (id == INSTS) cudaMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); if (id == MATS) cudaMemcpyToSymbol( materials, &p, sizeof( void* ) ); if (id == TLGHTS) cudaMemcpyToSymbol( triLights, &p, sizeof( void* ) ); if (id == PLGHTS) cudaMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); if (id == SLGHTS) cudaMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); if (id == DLGHTS) cudaMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); if (id == RGB32) cudaMemcpyToSymbol( argb32, &p, sizeof( void* ) ); if (id == RGBH) cudaMemcpyToSymbol( argb128, &p, sizeof( void* ) ); if (id == NRMLS) cudaMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); if (id == SKYPIX) cudaMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); if (id == DBGDAT) cudaMemcpyToSymbol( debugData, &p, sizeof( void* ) ); if (id == LTREE) cudaMemcpyToSymbol( lightTree, &p, sizeof( void* ) ); } __host__ static void pushIntCpy( int id, const int v ) { if (id == SKYW) cudaMemcpyToSymbol( skywidth, &v, sizeof( int ) ); if (id == SKYH) cudaMemcpyToSymbol( skyheight, &v, sizeof( int ) ); } __host__ static void pushF32Cpy( int id, const float v ) { if (id == GEPS) cudaMemcpyToSymbol( geometryEpsilon, &v, sizeof( float ) ); if (id == CLMPV) cudaMemcpyToSymbol( clampValue, &v, sizeof( int ) ); } __host__ static void pushMatCpy( int id, const mat4& m ) { if (id == SMAT) cudaMemcpyToSymbol( worldToSky, &m, sizeof( mat4 ) ); } __host__ static void pushInt4Cpy( int id, const int4& v ) { if (id == LCNTS) cudaMemcpyToSymbol( lightCounts, &v, sizeof( int4 ) ); } #define MAXVARS 32 static void* prevPtr[MAXVARS] = {}; static int prevInt[MAXVARS] = {}; static float prevFloat[MAXVARS] = {}; static int4 prevInt4[MAXVARS] = {}; static bool prevValSet[MAXVARS] = {}; __host__ static void stagePtrCpy( int id, void* p ) { if (prevPtr[id] == p) return; // not changed StagedPtr n = { p, id }; stagedPtr.push_back( n ); prevPtr[id] = p; } __host__ static void stageIntCpy( int id, const int v ) { if (prevValSet[id] == true && prevInt[id] == v) return; StagedInt n = { v, id }; stagedInt.push_back( n ); prevValSet[id] = true; prevInt[id] = v; } __host__ static void stageF32Cpy( int id, const float v ) { if (prevValSet[id] == true && prevFloat[id] == v) return; StagedF32 n = { v, id }; stagedF32.push_back( n ); prevValSet[id] = true; prevFloat[id] = v; } __host__ static void stageMatCpy( int id, const mat4& m ) { StagedMat n = { m, id }; stagedMat.push_back( n ); } __host__ static void stageInt4Cpy( int id, const int4& v ) { if (prevValSet[id] == true && prevInt4[id].x == v.x && prevInt4[id].y == v.y && prevInt4[id].z == v.z && prevInt4[id].w == v.w) return; StagedInt4 n = { v, id }; stagedInt4.push_back( n ); prevValSet[id] = true; prevInt4[id] = v; } __host__ void stageMemcpy( void* d, void* s, int n ) { StagedCpy c = { d, s, n }; stagedCpy.push_back( c ); } __host__ void stageInstanceDescriptors( CoreInstanceDesc* p ) { stagePtrCpy( INSTS /* instanceDescriptors */, p ); } __host__ void stageMaterialList( CUDAMaterial* p ) { stagePtrCpy( MATS /* materials */, p ); } __host__ void stageTriLights( CoreLightTri* p ) { stagePtrCpy( TLGHTS /* triLights */, p ); } __host__ void stagePointLights( CorePointLight* p ) { stagePtrCpy( PLGHTS /* pointLights */, p ); } __host__ void stageSpotLights( CoreSpotLight* p ) { stagePtrCpy( SLGHTS /* spotLights */, p ); } __host__ void stageDirectionalLights( CoreDirectionalLight* p ) { stagePtrCpy( DLGHTS /* directionalLights */, p ); } __host__ void stageARGB32Pixels( uint* p ) { stagePtrCpy( RGB32 /* argb32 */, p ); } __host__ void stageARGB128Pixels( float4* p ) { stagePtrCpy( RGBH /* argb128 */, p ); } __host__ void stageNRM32Pixels( uint* p ) { stagePtrCpy( NRMLS /* nrm32 */, p ); } __host__ void stageSkyPixels( float4* p ) { stagePtrCpy( SKYPIX /* skyPixels */, p ); } __host__ void stageSkySize( int w, int h ) { stageIntCpy( SKYW /* skywidth */, w ); stageIntCpy( SKYH /* skyheight */, h ); } __host__ void stageWorldToSky( const mat4& worldToLight ) { stageMatCpy( SMAT /* worldToSky */, worldToLight ); } __host__ void stageDebugData( float4* p ) { stagePtrCpy( DBGDAT /* debugData */, p ); } __host__ void stageGeometryEpsilon( float e ) { stageF32Cpy( GEPS /* geometryEpsilon */, e ); } __host__ void stageClampValue( float c ) { stageF32Cpy( CLMPV /* clampValue */, c ); } __host__ void stageLightTree( LightCluster* t ) { stagePtrCpy( LTREE /* light tree */, t ); } __host__ void stageLightCounts( int tri, int point, int spot, int directional ) { const int4 counts = make_int4( tri, point, spot, directional ); stageInt4Cpy( LCNTS /* lightCounts */, counts ); } __host__ void pushStagedCopies() { for (auto c : stagedCpy) cudaMemcpy( c.d, c.s, c.n, cudaMemcpyHostToDevice ); stagedCpy.clear(); for (auto n : stagedPtr) pushPtrCpy( n.id, n.p ); stagedPtr.clear(); for (auto n : stagedInt) pushIntCpy( n.id, n.v ); stagedInt.clear(); for (auto n : stagedInt4) pushInt4Cpy( n.id, n.v ); stagedInt4.clear(); for (auto n : stagedF32) pushF32Cpy( n.id, n.v ); stagedF32.clear(); for (auto n : stagedMat) pushMatCpy( n.id, n.v ); stagedMat.clear(); } // counters for persistent threads static __device__ Counters* counters; __global__ void InitCountersForExtend_Kernel( int pathCount ) { if (threadIdx.x != 0) return; counters->activePaths = pathCount; // remaining active paths counters->extensionRays = 0; // compaction counter for extension rays counters->shadowRays = 0; // compaction counter for connections counters->totalExtensionRays = pathCount; counters->totalShadowRays = 0; } __host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); } __global__ void InitCountersSubsequent_Kernel() { if (threadIdx.x != 0) return; counters->totalExtensionRays += counters->extensionRays; counters->activePaths = counters->extensionRays; // remaining active paths counters->extensionRays = 0; // compaction counter for extension rays } __host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); } __host__ void SetCounters( Counters* p ) { cudaMemcpyToSymbol( counters, &p, sizeof( void* ) ); } // functional blocks #include "tools_shared.h" #include "sampling_shared.h" #include "material_shared.h" #include "lights_shared.h" #include "bsdf.h" #include "camera.h" #include "pathtracer.h" #include "finalize_shared.h" #include "connections.h" } // namespace lh2core // EOF
a1bce8e44a99b0e36abbd2ab73d4f6024ab001cd.hip
// !!! This is a file automatically generated by hipify!!! #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void addKernel(int *c, int *a, int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; // } int main() { const int N = 100; int size = N * sizeof(int); int *a_dev; int *b_dev; int *c_dev; // int *c_host = (int*)malloc(size); int *a_host = (int*)malloc(size); int *b_host = (int*)malloc(size); // hipMalloc(&a_dev, size); hipMalloc(&b_dev, size); hipMalloc(&c_dev, size); // hipDeviceSynchronize(); for (int i = 0; i < N; i++) { a_host[i] = 1; b_host[i] = 2; } // hipMemcpy(a_dev, a_host, size, hipMemcpyHostToDevice); // hipMemcpy(b_dev, b_host, size, hipMemcpyHostToDevice); addKernel << <1, N >> >(c_dev, a_dev, b_dev); // hipDeviceSynchronize(); hipMemcpy(c_host, c_dev, size, hipMemcpyDeviceToHost); // int res = 0; for (int i = 0; i < N; i++) { res += c_host[i]; } // cout << "result: " << res << endl; return 0; }
a1bce8e44a99b0e36abbd2ab73d4f6024ab001cd.cu
#include <device_launch_parameters.h> #include <cuda_runtime.h> #include <stdio.h> #include <iostream> using namespace std; __global__ void addKernel(int *c, int *a, int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; // Сложение двух векторов на ГПУ } int main() { const int N = 100; int size = N * sizeof(int); int *a_dev; int *b_dev; int *c_dev; // Объявление указателей для ГПУ int *c_host = (int*)malloc(size); int *a_host = (int*)malloc(size); int *b_host = (int*)malloc(size); // Выделение памяти ЦПУ cudaMalloc(&a_dev, size); cudaMalloc(&b_dev, size); cudaMalloc(&c_dev, size); // Выделение памяти ГПУ cudaDeviceSynchronize(); for (int i = 0; i < N; i++) { a_host[i] = 1; b_host[i] = 2; } // Инициализация векторов на хосте cudaMemcpy(a_dev, a_host, size, cudaMemcpyHostToDevice); // Копирование значений на ГПУ cudaMemcpy(b_dev, b_host, size, cudaMemcpyHostToDevice); addKernel << <1, N >> >(c_dev, a_dev, b_dev); // Вызов функции на ГПУ cudaDeviceSynchronize(); cudaMemcpy(c_host, c_dev, size, cudaMemcpyDeviceToHost); // Копирование результата на хост int res = 0; for (int i = 0; i < N; i++) { res += c_host[i]; } // Сложение всех значений cout << "result: " << res << endl; return 0; }
53ca81da048dba1518a962a528c42a0c618689fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void zmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_rowptr, magma_index_t *d_colind, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ magmaDoubleComplex dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); int start = d_rowptr[ row ] ; int end = d_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = d_colind [ j ]; magmaDoubleComplex val = d_val[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * d_x[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) d_y[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * d_y[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param num_vecs mama_int_t number of vectors @param alpha magmaDoubleComplex scalar multiplier @param d_val magmaDoubleComplex* array containing values of A in CSR @param d_rowptr magma_int_t* rowpointer of A in CSR @param d_colind magma_int_t* columnindices of A in CSR @param d_x magmaDoubleComplex* input vector x @param beta magmaDoubleComplex scalar multiplier @param d_y magmaDoubleComplex* input/output vector y @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_rowptr, magma_index_t *d_colind, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors hipLaunchKernelGGL(( zmgecsrmv_kernel), dim3(grid), dim3(BLOCK_SIZE), MEM_SIZE , 0, m, n, num_vecs, alpha, d_val, d_rowptr, d_colind, d_x, beta, d_y); return MAGMA_SUCCESS; }
53ca81da048dba1518a962a528c42a0c618689fe.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> c d s */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void zmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_rowptr, magma_index_t *d_colind, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ magmaDoubleComplex dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); int start = d_rowptr[ row ] ; int end = d_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = d_colind [ j ]; magmaDoubleComplex val = d_val[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * d_x[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) d_y[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * d_y[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param num_vecs mama_int_t number of vectors @param alpha magmaDoubleComplex scalar multiplier @param d_val magmaDoubleComplex* array containing values of A in CSR @param d_rowptr magma_int_t* rowpointer of A in CSR @param d_colind magma_int_t* columnindices of A in CSR @param d_x magmaDoubleComplex* input vector x @param beta magmaDoubleComplex scalar multiplier @param d_y magmaDoubleComplex* input/output vector y @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_rowptr, magma_index_t *d_colind, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors zmgecsrmv_kernel<<< grid, BLOCK_SIZE, MEM_SIZE >>> (m, n, num_vecs, alpha, d_val, d_rowptr, d_colind, d_x, beta, d_y); return MAGMA_SUCCESS; }
ee3f839d23437e0d3f927ef060fbc0f885a0cb0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel/gpu/cuda_impl/sigmoid_cross_entropy_with_logits_impl.cuh" template <typename T, typename S> __global__ void SigmoidCrossEntropyWithLogitsKernel(const size_t size, const T *logits, const S *labels, T *outputs) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const T reverse_factor = static_cast<T>(logits[i] >= 0); outputs[i] = log1p(exp(logits[i] - 2 * reverse_factor * logits[i])) - logits[i] * (labels[i] - reverse_factor); } } template <typename T, typename S> void SigmoidCrossEntropyWithLogits(const size_t size, const T *logits, const S *labels, T *outputs, hipStream_t cuda_stream) { hipLaunchKernelGGL(( SigmoidCrossEntropyWithLogitsKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, logits, labels, outputs); } template void SigmoidCrossEntropyWithLogits<float, float>(const size_t size, const float *logits, const float *labels, float *outputs, hipStream_t cuda_stream);
ee3f839d23437e0d3f927ef060fbc0f885a0cb0a.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel/gpu/cuda_impl/sigmoid_cross_entropy_with_logits_impl.cuh" template <typename T, typename S> __global__ void SigmoidCrossEntropyWithLogitsKernel(const size_t size, const T *logits, const S *labels, T *outputs) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { const T reverse_factor = static_cast<T>(logits[i] >= 0); outputs[i] = log1p(exp(logits[i] - 2 * reverse_factor * logits[i])) - logits[i] * (labels[i] - reverse_factor); } } template <typename T, typename S> void SigmoidCrossEntropyWithLogits(const size_t size, const T *logits, const S *labels, T *outputs, cudaStream_t cuda_stream) { SigmoidCrossEntropyWithLogitsKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, logits, labels, outputs); } template void SigmoidCrossEntropyWithLogits<float, float>(const size_t size, const float *logits, const float *labels, float *outputs, cudaStream_t cuda_stream);
c4e068d6e82ec7378ed7791afd6a0b8b091ebee5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define _USE_MATH_DEFINES #include <math.h> #include <ATen/ATen.h> #include <ATen/DeviceGuard.h> #include <ATen/Dispatch.h> #include <ATen/native/hip/ForeachFunctors.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/ForeachUtils.h> #include <ATen/native/TensorIterator.h> namespace { // Thin wrapper around https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE_1g57a3c8313f570282a1a7bcc78743b08e, // to ensure the Cuda math library's isfinite is actually what gets called in // _amp_non_finite_check_and_unscale_cuda_'s gpu_kernel lambda. // // isfinite_ensure_cuda_math is defined outside at::native because: // - A bare call to "isfinite(val)" inside at::native causes nvcc to prefer the unrelated // Tensor at::native::isfinite(const Tensor&), resulting in an error: // "no suitable constructor exists to convert from "float" to "at::Tensor"" // - Unfortunately, the Cuda math library documentation doesn't say how (or if) you can provide a full namespace path // to ensure that its version of a particular function is invoked. It only shows bare (not-namespaced) // calls to its routines inside kernel or device functions. // - "std::isfinite(val)" in the gpu_kernel lambda causes an "unspecified launch failure" at runtime with cuda 9 on Windows. // // isfinite_ensure_cuda_math, declared at file scope outside the at::native region, uses isfinite as math library docs // suggest and allows disambiguated usage in the lambda within the at::native region. // GPU_LAMBDA is defined as __host__ __device__ (see Loops.cuh), so I need the __host__ keyword or else nvcc complains that // "calling a __device__ function("isfinite_ensure_cuda_math") from a __host__ __device__ function("operator()") is not allowed." static __host__ __device__ __forceinline__ int isfinite_ensure_cuda_math(float val) { return isfinite(val); } } namespace at { namespace native { namespace { // Single-tensor fallback for _amp_foreach_non_finite_check_and_unscale_cuda_. // Handles individual tensors that are acceptable to unscale but not MTA-safe. void _amp_non_finite_check_and_unscale_cuda_(Tensor& scaled_grad, Tensor& found_inf, const Tensor& inv_scale) { // The only way we reach this function is through _amp_foreach_non_finite_check_and_unscale_cuda_, so no input checks. // It's not obvious gpu_kernel always guards onto its argument. Guarding here just in case. const OptionalDeviceGuard device_guard(device_of(scaled_grad)); // Acts on scaled_grad in place. auto iter = TensorIterator::unary_op(scaled_grad, scaled_grad); AT_DISPATCH_FLOATING_TYPES_AND_HALF( iter.dtype(), "_amp_non_finite_check_and_unscale_cuda", [&iter, &found_inf, &inv_scale] { auto* found_inf_ptr = found_inf.data_ptr<float>(); auto* inv_scale_ptr = inv_scale.data_ptr<float>(); using opmath_t = get_opmath_t<scalar_t>::opmath_t; gpu_kernel(iter, [found_inf_ptr, inv_scale_ptr] GPU_LAMBDA (scalar_t val_in) -> scalar_t { auto val = static_cast<opmath_t>(val_in); if (!isfinite_ensure_cuda_math(val)) { *found_inf_ptr = 1.f; } // Every thread accesses inv_scale, but it will hit in cache. const auto inv_scale_val = *inv_scale_ptr; return static_cast<scalar_t>(inv_scale_val == 1.f ? val : val * inv_scale_val); }); }); } } // anonymous namespace // Multiplies each tensor in scaled_grads by inv_scale in-place. // If any element of any tensor in scaled_grads is inf or NaN, sets found_inf to 1.0. // Uses multi tensor apply (MTA) to process all MTA-safe tensors. // // Args: // scaled_grads: A TensorList of scaled gradient tensors. May contain infs or NaNs. // found_inf: A single-element float tensor to which 1.0 will be written if any gradient contain infs/nans. // Pre-zeroing found_inf, if appropriate, is the responsibility of the caller. // inv_scale: The inverse of the scale factor by which scaled_grads are currently multiplied. void _amp_foreach_non_finite_check_and_unscale_cuda_(TensorList scaled_grads, Tensor& found_inf, const Tensor& inv_scale) { if (scaled_grads.size() == 0) { return; } TORCH_CHECK(inv_scale.is_cuda(), "inv_scale must be a CUDA tensor."); TORCH_CHECK(found_inf.is_cuda(), "found_inf must be a CUDA tensor."); TORCH_CHECK(inv_scale.numel() == 1, "inv_scale must be a 1-element tensor."); TORCH_CHECK(found_inf.numel() == 1, "found_inf must be a 1-element tensor."); TORCH_CHECK(inv_scale.scalar_type() == at::ScalarType::Float, "inv_scale must be a float tensor."); TORCH_CHECK(found_inf.scalar_type() == at::ScalarType::Float, "found_inf must be a float tensor."); // Ensures client code (GradScaler) filtered scaled_grads by dtype. check_foreach_api_restrictions(scaled_grads); std::vector<std::vector<at::Tensor>> tensor_lists; // is_non_overlapping_and_dense() is not available in Python. // GradScaler can't filter for it. We need to filter here. if (can_use_fast_route(scaled_grads)) { // Hopefully common case. // can_use_fast_route is true, which confirms: // - all scaled_grads are strided // - all scaled_grads are non overlapping and dense // - all scaled_grads are on the same device TORCH_CHECK(scaled_grads[0].is_cuda(), "scaled_grads must be CUDA tensors."); // Sets up MTA launch to use scaled_grads as-is. tensor_lists.emplace_back(scaled_grads.vec()); } else { // Hopefully uncommon case. // can_use_fast_route is an all-or-nothing check. In this path it was false, // so any of the above confirmations could have gone wrong. // We filter MTA-safe tensors into an MTA-able list. // If a tensor is acceptable but not MTA-safe, we fall back to the TensorIterator kernel. // If a tensor is unacceptable, we throw an error to blame GradScaler. tensor_lists.resize(1); tensor_lists[0].reserve(scaled_grads.size()); auto expected_device = scaled_grads[0].device(); for (const Tensor& t : scaled_grads) { // Ensures GradScaler filtered scaled_grads by device. TORCH_CHECK(t.is_cuda(), "one of scaled_grads was not a CUDA tensor."); TORCH_CHECK(t.device() == expected_device, "scaled_grads must be on the same device."); TORCH_CHECK(t.layout() == at::kStrided, "one of scaled_grads was not a strided tensor."); if (!t.is_non_overlapping_and_dense()) { // t is acceptable but not MTA-safe. Falls back to single-tensor TensorIterator kernel. _amp_non_finite_check_and_unscale_cuda_(const_cast<Tensor&>(t), found_inf, inv_scale); } else { tensor_lists[0].push_back(t); } } if (tensor_lists[0].size() == 0) { return; } } AT_DISPATCH_FLOATING_TYPES_AND_HALF( tensor_lists[0][0].scalar_type(), "_amp_foreach_non_finite_check_and_unscale_cuda", [&tensor_lists, &found_inf, &inv_scale] { auto* found_inf_ptr = found_inf.data_ptr<float>(); auto* inv_scale_ptr = inv_scale.data_ptr<float>(); using opmath_t = get_opmath_t<scalar_t>::opmath_t; // multi_tensor_apply guards onto tensor_lists[0][0], no need to guard explicitly. multi_tensor_apply<1>(tensor_lists, UnaryOpFunctor<scalar_t, /* depth */ 1, /* r_args_depth */ 1, /* res_arg_index */ 0>(), [found_inf_ptr, inv_scale_ptr] GPU_LAMBDA (opmath_t val) -> opmath_t { // There is a slight asymmetry here with the TensorIterator kernel above. // MTA Functors ensure val comes in as opmath_t rather than scalar_t. if (!isfinite_ensure_cuda_math(val)) { *found_inf_ptr = 1.f; } // Every thread accesses inv_scale, but it will hit in cache. const auto inv_scale_val = *inv_scale_ptr; return static_cast<opmath_t>(inv_scale_val == 1.f ? val : val * inv_scale_val); }); }); } // amp_update_scale_cuda_kernel is launched with a single thread to compute the new scale. // The scale factor is maintained and updated on the GPU to avoid synchronization. __global__ void amp_update_scale_cuda_kernel(int* growth_tracker, float* current_scale, float* found_inf, float* new_scale, double growth_factor, double backoff_factor, int growth_interval) { if (*found_inf) { *new_scale = (*current_scale)*backoff_factor; *growth_tracker = 0; } else { // Entering this branch means we just carried out a successful step, // so growth_tracker is incremented before comparing to growth_interval. auto successful = (*growth_tracker) + 1; if (successful == growth_interval) { *new_scale = (*current_scale)*growth_factor; *growth_tracker = 0; } else { *new_scale = *current_scale; *growth_tracker = successful; } } } // _amp_update_scale_cuda asynchronously updates the scale factor. // // Args: // growth_tracker: A one-element torch.cuda.IntTensor containing the number of recent consecutive unskipped steps. // current_scale: A one-element torch.cuda.FloatTensor containing the current scale value. // found_inf: A one-element torch.cuda.FloatTensor. If > 0, indicates that infs/nans were found by the relevant // prior _amp_non_finite_check_and_unscale_cuda call, and 0 if no infs/nans were found. // growth_factor: Multiplier if no infs/NaNs were found (typically slightly > 1). // backoff_factor: Multiplier if infs/NaNs were found (typically 0.5). // growth_interval: Number of consecutive unskipped steps that must occur for current_scale to be multiplied by // growth_factor. // // Returns: // new_scale: A new one-element torch.cuda.FloatTensor containing the new recommended scale value. Tensor _amp_update_scale_cuda(Tensor& growth_tracker, const Tensor& current_scale, const Tensor& found_inf, double growth_factor, double backoff_factor, int64_t growth_interval) { TORCH_CHECK(growth_tracker.is_cuda(), "growth_tracker must be a CUDA tensor."); TORCH_CHECK(current_scale.is_cuda(), "current_scale must be a CUDA tensor."); TORCH_CHECK(found_inf.is_cuda(), "found_inf must be a CUDA tensor."); TORCH_CHECK(growth_tracker.numel() == 1, "growth_tracker must be a 1-element tensor."); TORCH_CHECK(current_scale.numel() == 1, "current_scale must be a 1-element tensor."); TORCH_CHECK(found_inf.numel() == 1, "found_inf must be a 1-element tensor."); TORCH_CHECK(growth_tracker.scalar_type() == at::ScalarType::Int, "growth_tracker must be an int tensor."); TORCH_CHECK(current_scale.scalar_type() == at::ScalarType::Float, "current_scale must be a float tensor."); TORCH_CHECK(found_inf.scalar_type() == at::ScalarType::Float, "found_inf must be a float tensor."); auto new_scale = at::empty_like(current_scale); hipLaunchKernelGGL(( amp_update_scale_cuda_kernel), dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), growth_tracker.data_ptr<int>(), current_scale.data_ptr<float>(), found_inf.data_ptr<float>(), new_scale.data_ptr<float>(), growth_factor, backoff_factor, growth_interval); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); return new_scale; } }} // namespace at::native
c4e068d6e82ec7378ed7791afd6a0b8b091ebee5.cu
#define _USE_MATH_DEFINES #include <math.h> #include <ATen/ATen.h> #include <ATen/DeviceGuard.h> #include <ATen/Dispatch.h> #include <ATen/native/cuda/ForeachFunctors.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/ForeachUtils.h> #include <ATen/native/TensorIterator.h> namespace { // Thin wrapper around https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE_1g57a3c8313f570282a1a7bcc78743b08e, // to ensure the Cuda math library's isfinite is actually what gets called in // _amp_non_finite_check_and_unscale_cuda_'s gpu_kernel lambda. // // isfinite_ensure_cuda_math is defined outside at::native because: // - A bare call to "isfinite(val)" inside at::native causes nvcc to prefer the unrelated // Tensor at::native::isfinite(const Tensor&), resulting in an error: // "no suitable constructor exists to convert from "float" to "at::Tensor"" // - Unfortunately, the Cuda math library documentation doesn't say how (or if) you can provide a full namespace path // to ensure that its version of a particular function is invoked. It only shows bare (not-namespaced) // calls to its routines inside kernel or device functions. // - "std::isfinite(val)" in the gpu_kernel lambda causes an "unspecified launch failure" at runtime with cuda 9 on Windows. // // isfinite_ensure_cuda_math, declared at file scope outside the at::native region, uses isfinite as math library docs // suggest and allows disambiguated usage in the lambda within the at::native region. // GPU_LAMBDA is defined as __host__ __device__ (see Loops.cuh), so I need the __host__ keyword or else nvcc complains that // "calling a __device__ function("isfinite_ensure_cuda_math") from a __host__ __device__ function("operator()") is not allowed." static __host__ __device__ __forceinline__ int isfinite_ensure_cuda_math(float val) { return isfinite(val); } } namespace at { namespace native { namespace { // Single-tensor fallback for _amp_foreach_non_finite_check_and_unscale_cuda_. // Handles individual tensors that are acceptable to unscale but not MTA-safe. void _amp_non_finite_check_and_unscale_cuda_(Tensor& scaled_grad, Tensor& found_inf, const Tensor& inv_scale) { // The only way we reach this function is through _amp_foreach_non_finite_check_and_unscale_cuda_, so no input checks. // It's not obvious gpu_kernel always guards onto its argument. Guarding here just in case. const OptionalDeviceGuard device_guard(device_of(scaled_grad)); // Acts on scaled_grad in place. auto iter = TensorIterator::unary_op(scaled_grad, scaled_grad); AT_DISPATCH_FLOATING_TYPES_AND_HALF( iter.dtype(), "_amp_non_finite_check_and_unscale_cuda", [&iter, &found_inf, &inv_scale] { auto* found_inf_ptr = found_inf.data_ptr<float>(); auto* inv_scale_ptr = inv_scale.data_ptr<float>(); using opmath_t = get_opmath_t<scalar_t>::opmath_t; gpu_kernel(iter, [found_inf_ptr, inv_scale_ptr] GPU_LAMBDA (scalar_t val_in) -> scalar_t { auto val = static_cast<opmath_t>(val_in); if (!isfinite_ensure_cuda_math(val)) { *found_inf_ptr = 1.f; } // Every thread accesses inv_scale, but it will hit in cache. const auto inv_scale_val = *inv_scale_ptr; return static_cast<scalar_t>(inv_scale_val == 1.f ? val : val * inv_scale_val); }); }); } } // anonymous namespace // Multiplies each tensor in scaled_grads by inv_scale in-place. // If any element of any tensor in scaled_grads is inf or NaN, sets found_inf to 1.0. // Uses multi tensor apply (MTA) to process all MTA-safe tensors. // // Args: // scaled_grads: A TensorList of scaled gradient tensors. May contain infs or NaNs. // found_inf: A single-element float tensor to which 1.0 will be written if any gradient contain infs/nans. // Pre-zeroing found_inf, if appropriate, is the responsibility of the caller. // inv_scale: The inverse of the scale factor by which scaled_grads are currently multiplied. void _amp_foreach_non_finite_check_and_unscale_cuda_(TensorList scaled_grads, Tensor& found_inf, const Tensor& inv_scale) { if (scaled_grads.size() == 0) { return; } TORCH_CHECK(inv_scale.is_cuda(), "inv_scale must be a CUDA tensor."); TORCH_CHECK(found_inf.is_cuda(), "found_inf must be a CUDA tensor."); TORCH_CHECK(inv_scale.numel() == 1, "inv_scale must be a 1-element tensor."); TORCH_CHECK(found_inf.numel() == 1, "found_inf must be a 1-element tensor."); TORCH_CHECK(inv_scale.scalar_type() == at::ScalarType::Float, "inv_scale must be a float tensor."); TORCH_CHECK(found_inf.scalar_type() == at::ScalarType::Float, "found_inf must be a float tensor."); // Ensures client code (GradScaler) filtered scaled_grads by dtype. check_foreach_api_restrictions(scaled_grads); std::vector<std::vector<at::Tensor>> tensor_lists; // is_non_overlapping_and_dense() is not available in Python. // GradScaler can't filter for it. We need to filter here. if (can_use_fast_route(scaled_grads)) { // Hopefully common case. // can_use_fast_route is true, which confirms: // - all scaled_grads are strided // - all scaled_grads are non overlapping and dense // - all scaled_grads are on the same device TORCH_CHECK(scaled_grads[0].is_cuda(), "scaled_grads must be CUDA tensors."); // Sets up MTA launch to use scaled_grads as-is. tensor_lists.emplace_back(scaled_grads.vec()); } else { // Hopefully uncommon case. // can_use_fast_route is an all-or-nothing check. In this path it was false, // so any of the above confirmations could have gone wrong. // We filter MTA-safe tensors into an MTA-able list. // If a tensor is acceptable but not MTA-safe, we fall back to the TensorIterator kernel. // If a tensor is unacceptable, we throw an error to blame GradScaler. tensor_lists.resize(1); tensor_lists[0].reserve(scaled_grads.size()); auto expected_device = scaled_grads[0].device(); for (const Tensor& t : scaled_grads) { // Ensures GradScaler filtered scaled_grads by device. TORCH_CHECK(t.is_cuda(), "one of scaled_grads was not a CUDA tensor."); TORCH_CHECK(t.device() == expected_device, "scaled_grads must be on the same device."); TORCH_CHECK(t.layout() == at::kStrided, "one of scaled_grads was not a strided tensor."); if (!t.is_non_overlapping_and_dense()) { // t is acceptable but not MTA-safe. Falls back to single-tensor TensorIterator kernel. _amp_non_finite_check_and_unscale_cuda_(const_cast<Tensor&>(t), found_inf, inv_scale); } else { tensor_lists[0].push_back(t); } } if (tensor_lists[0].size() == 0) { return; } } AT_DISPATCH_FLOATING_TYPES_AND_HALF( tensor_lists[0][0].scalar_type(), "_amp_foreach_non_finite_check_and_unscale_cuda", [&tensor_lists, &found_inf, &inv_scale] { auto* found_inf_ptr = found_inf.data_ptr<float>(); auto* inv_scale_ptr = inv_scale.data_ptr<float>(); using opmath_t = get_opmath_t<scalar_t>::opmath_t; // multi_tensor_apply guards onto tensor_lists[0][0], no need to guard explicitly. multi_tensor_apply<1>(tensor_lists, UnaryOpFunctor<scalar_t, /* depth */ 1, /* r_args_depth */ 1, /* res_arg_index */ 0>(), [found_inf_ptr, inv_scale_ptr] GPU_LAMBDA (opmath_t val) -> opmath_t { // There is a slight asymmetry here with the TensorIterator kernel above. // MTA Functors ensure val comes in as opmath_t rather than scalar_t. if (!isfinite_ensure_cuda_math(val)) { *found_inf_ptr = 1.f; } // Every thread accesses inv_scale, but it will hit in cache. const auto inv_scale_val = *inv_scale_ptr; return static_cast<opmath_t>(inv_scale_val == 1.f ? val : val * inv_scale_val); }); }); } // amp_update_scale_cuda_kernel is launched with a single thread to compute the new scale. // The scale factor is maintained and updated on the GPU to avoid synchronization. __global__ void amp_update_scale_cuda_kernel(int* growth_tracker, float* current_scale, float* found_inf, float* new_scale, double growth_factor, double backoff_factor, int growth_interval) { if (*found_inf) { *new_scale = (*current_scale)*backoff_factor; *growth_tracker = 0; } else { // Entering this branch means we just carried out a successful step, // so growth_tracker is incremented before comparing to growth_interval. auto successful = (*growth_tracker) + 1; if (successful == growth_interval) { *new_scale = (*current_scale)*growth_factor; *growth_tracker = 0; } else { *new_scale = *current_scale; *growth_tracker = successful; } } } // _amp_update_scale_cuda asynchronously updates the scale factor. // // Args: // growth_tracker: A one-element torch.cuda.IntTensor containing the number of recent consecutive unskipped steps. // current_scale: A one-element torch.cuda.FloatTensor containing the current scale value. // found_inf: A one-element torch.cuda.FloatTensor. If > 0, indicates that infs/nans were found by the relevant // prior _amp_non_finite_check_and_unscale_cuda call, and 0 if no infs/nans were found. // growth_factor: Multiplier if no infs/NaNs were found (typically slightly > 1). // backoff_factor: Multiplier if infs/NaNs were found (typically 0.5). // growth_interval: Number of consecutive unskipped steps that must occur for current_scale to be multiplied by // growth_factor. // // Returns: // new_scale: A new one-element torch.cuda.FloatTensor containing the new recommended scale value. Tensor _amp_update_scale_cuda(Tensor& growth_tracker, const Tensor& current_scale, const Tensor& found_inf, double growth_factor, double backoff_factor, int64_t growth_interval) { TORCH_CHECK(growth_tracker.is_cuda(), "growth_tracker must be a CUDA tensor."); TORCH_CHECK(current_scale.is_cuda(), "current_scale must be a CUDA tensor."); TORCH_CHECK(found_inf.is_cuda(), "found_inf must be a CUDA tensor."); TORCH_CHECK(growth_tracker.numel() == 1, "growth_tracker must be a 1-element tensor."); TORCH_CHECK(current_scale.numel() == 1, "current_scale must be a 1-element tensor."); TORCH_CHECK(found_inf.numel() == 1, "found_inf must be a 1-element tensor."); TORCH_CHECK(growth_tracker.scalar_type() == at::ScalarType::Int, "growth_tracker must be an int tensor."); TORCH_CHECK(current_scale.scalar_type() == at::ScalarType::Float, "current_scale must be a float tensor."); TORCH_CHECK(found_inf.scalar_type() == at::ScalarType::Float, "found_inf must be a float tensor."); auto new_scale = at::empty_like(current_scale); amp_update_scale_cuda_kernel<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( growth_tracker.data_ptr<int>(), current_scale.data_ptr<float>(), found_inf.data_ptr<float>(), new_scale.data_ptr<float>(), growth_factor, backoff_factor, growth_interval); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); return new_scale; } }} // namespace at::native
4db7c292821512eb53239746be156c579eb002ed.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> /* * send matrix and vector to gpu * multiply each column of matrix with the vector(each matrix element with corresponding vector element) * store the result in the same matrix(same position) * get the matrix from gpu * sum each column of the matrix to a new vector * print the result * * use PrintMatrixElement to print individual elements */ //used for error checking void ErrorCheck(char *c) { if (hipGetLastError() != 0) { printf("error @ %s -> %s \n", c, hipGetErrorString(hipGetLastError())); //exit(-1); } } //kernel that executes on CUDA device __global__ void MatrixVectorMultiplication(float *mtrx, float *vect, int rows, int columns) { //vect's length = rows int idx = blockIdx.x*blockDim.x + threadIdx.x; int maxIndex = rows * columns; if (idx < maxIndex) { mtrx[idx] = mtrx[idx] * vect[idx/columns];//save the result to the mtrx for saving space } } //used for printing vectors void PrintVector(float *vec,int length) { printf("\n ######### \n printing vector of size %d \n \n", length); for (int i = 0; i < length; i++) { printf("%f ", vec[i]); } printf("\n \n printed vector of size %d \n ######### \n", length); } //prints the element in the (x,y) position of the matrix (does not do error checks) // !! MY MATRIX STARTS AT (0,0) !! // columnCount is the number of columns in matrix void PrintMatrixElement(float *mtrx,int columnCount, int x, int y) { printf("\n ######### \n printing matrix element at %d , %d \n \n", x, y); printf("%d",mtrx[columnCount*x+y]); printf("\n \n printed matrix element at %d , %d \n ######### \n", x, y); } //used for printing matrices void PrintMatrix(float *mtrx, int row, int column) { printf("\n ######### \n printing matrix of size %d by %d \n \n", row,column); for (int i = 0; i < row; i++) { printf("| "); for (int j = 0; j < column; j++) { printf("%f ", mtrx[i*column + j]); } printf("|\n\n"); } printf("\n \n printed matrix of size %d by %d \n ######### \n", row,column); } int main(void) { float *matr_d, *matr_h;//input matrix float *vec_d, *vec_h;//input vector // I am assuming the size of the vector does not change in the code float *result_vector_h;//resulted vector const int m = 50000;//m rows && this is also the number of elements in the multiplying vector const int n = 700;//n columns && this is also the number of elements in the resulting vector //allocate memory & set values for matrix & vector on CPU matr_h = (float*)malloc(sizeof(float)*m*n); for (int i = 0; i < m; i++)//rows { for (int j = 0; j < n; j++)//columns { matr_h[i*n + j] = i * n + j;//set matrix values } } vec_h = (float*)malloc(sizeof(float)*m); for (int i = 0; i < m; i++) { vec_h[i] = i;//set vector values } //use these to print the input matrix and vector //PrintMatrix(matr_h, m, n); //PrintVector(vec_h, m); //allocate memory for matrix and vector on GPU hipMalloc((void**)&matr_d, sizeof(float)*m*n); ErrorCheck("hipMalloc (matr_d)"); hipMalloc((void**)&vec_d, sizeof(float)*m); ErrorCheck("hipMalloc (vec_d)"); //copy matr_h & vec_h to matr_d & vec_d hipMemcpy(matr_d, matr_h, sizeof(float)*m*n, hipMemcpyHostToDevice); ErrorCheck("cudaMemCpy (matr_d)"); hipMemcpy(vec_d, vec_h, sizeof(float)*m, hipMemcpyHostToDevice); ErrorCheck("hipMemcpy (vec_d)"); int blockSize = 8; int numberOfBlocks = (m * n) / blockSize + ((m*n) % blockSize == 0 ? 0 : 1); // benchmarking hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); MatrixVectorMultiplication << <numberOfBlocks, blockSize >> > (matr_d, vec_d, m, n); ErrorCheck("kernel call (matrixVectorMultiplication)"); //allocate memory for result_h and copy the result from result_d //result_h = (float*)malloc(sizeof(float)*m*n); hipMemcpy(matr_h, matr_d, sizeof(float)*m*n, hipMemcpyDeviceToHost); ErrorCheck("hipMemcpy (result_h)"); // benchmarking hipEventRecord(stop, 0); hipEventSynchronize(stop); float et; hipEventElapsedTime(&et, start, stop); hipEventDestroy(start); hipEventDestroy(stop); //printf("\n @@@@@@@@@ benchmark result -> %f @@@@@@ \n", et); // use this to print the resulting matrix //PrintMatrix(matr_h, m, n); //set space for result_vector_h result_vector_h = (float*)malloc(sizeof(float)*n); //sum each column and set it to result_vector_h[columnIndex] for (int i = 0; i < n; i++)//columns { result_vector_h[i] = 0;//reset the value for (int j = 0; j < m; j++)//rows { result_vector_h[i] += matr_h[j*n + i]; } } //print the resulting vector printf("resulting vector is -> \n | "); for (int i = 0; i < n; i++) { printf("%f ", result_vector_h[i]); } printf("\n\n ############### \n\n"); //cleanup free(matr_h); free(vec_h); free(result_vector_h); hipFree(matr_d); hipFree(vec_d); exit(0); }
4db7c292821512eb53239746be156c579eb002ed.cu
#include <stdio.h> #include <cuda.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> /* * send matrix and vector to gpu * multiply each column of matrix with the vector(each matrix element with corresponding vector element) * store the result in the same matrix(same position) * get the matrix from gpu * sum each column of the matrix to a new vector * print the result * * use PrintMatrixElement to print individual elements */ //used for error checking void ErrorCheck(char *c) { if (cudaGetLastError() != 0) { printf("error @ %s -> %s \n", c, cudaGetErrorString(cudaGetLastError())); //exit(-1); } } //kernel that executes on CUDA device __global__ void MatrixVectorMultiplication(float *mtrx, float *vect, int rows, int columns) { //vect's length = rows int idx = blockIdx.x*blockDim.x + threadIdx.x; int maxIndex = rows * columns; if (idx < maxIndex) { mtrx[idx] = mtrx[idx] * vect[idx/columns];//save the result to the mtrx for saving space } } //used for printing vectors void PrintVector(float *vec,int length) { printf("\n ######### \n printing vector of size %d \n \n", length); for (int i = 0; i < length; i++) { printf("%f ", vec[i]); } printf("\n \n printed vector of size %d \n ######### \n", length); } //prints the element in the (x,y) position of the matrix (does not do error checks) // !! MY MATRIX STARTS AT (0,0) !! // columnCount is the number of columns in matrix void PrintMatrixElement(float *mtrx,int columnCount, int x, int y) { printf("\n ######### \n printing matrix element at %d , %d \n \n", x, y); printf("%d",mtrx[columnCount*x+y]); printf("\n \n printed matrix element at %d , %d \n ######### \n", x, y); } //used for printing matrices void PrintMatrix(float *mtrx, int row, int column) { printf("\n ######### \n printing matrix of size %d by %d \n \n", row,column); for (int i = 0; i < row; i++) { printf("| "); for (int j = 0; j < column; j++) { printf("%f ", mtrx[i*column + j]); } printf("|\n\n"); } printf("\n \n printed matrix of size %d by %d \n ######### \n", row,column); } int main(void) { float *matr_d, *matr_h;//input matrix float *vec_d, *vec_h;//input vector // I am assuming the size of the vector does not change in the code float *result_vector_h;//resulted vector const int m = 50000;//m rows && this is also the number of elements in the multiplying vector const int n = 700;//n columns && this is also the number of elements in the resulting vector //allocate memory & set values for matrix & vector on CPU matr_h = (float*)malloc(sizeof(float)*m*n); for (int i = 0; i < m; i++)//rows { for (int j = 0; j < n; j++)//columns { matr_h[i*n + j] = i * n + j;//set matrix values } } vec_h = (float*)malloc(sizeof(float)*m); for (int i = 0; i < m; i++) { vec_h[i] = i;//set vector values } //use these to print the input matrix and vector //PrintMatrix(matr_h, m, n); //PrintVector(vec_h, m); //allocate memory for matrix and vector on GPU cudaMalloc((void**)&matr_d, sizeof(float)*m*n); ErrorCheck("cudaMalloc (matr_d)"); cudaMalloc((void**)&vec_d, sizeof(float)*m); ErrorCheck("cudaMalloc (vec_d)"); //copy matr_h & vec_h to matr_d & vec_d cudaMemcpy(matr_d, matr_h, sizeof(float)*m*n, cudaMemcpyHostToDevice); ErrorCheck("cudaMemCpy (matr_d)"); cudaMemcpy(vec_d, vec_h, sizeof(float)*m, cudaMemcpyHostToDevice); ErrorCheck("cudaMemcpy (vec_d)"); int blockSize = 8; int numberOfBlocks = (m * n) / blockSize + ((m*n) % blockSize == 0 ? 0 : 1); // benchmarking cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); MatrixVectorMultiplication << <numberOfBlocks, blockSize >> > (matr_d, vec_d, m, n); ErrorCheck("kernel call (matrixVectorMultiplication)"); //allocate memory for result_h and copy the result from result_d //result_h = (float*)malloc(sizeof(float)*m*n); cudaMemcpy(matr_h, matr_d, sizeof(float)*m*n, cudaMemcpyDeviceToHost); ErrorCheck("cudaMemcpy (result_h)"); // benchmarking cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float et; cudaEventElapsedTime(&et, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //printf("\n @@@@@@@@@ benchmark result -> %f @@@@@@ \n", et); // use this to print the resulting matrix //PrintMatrix(matr_h, m, n); //set space for result_vector_h result_vector_h = (float*)malloc(sizeof(float)*n); //sum each column and set it to result_vector_h[columnIndex] for (int i = 0; i < n; i++)//columns { result_vector_h[i] = 0;//reset the value for (int j = 0; j < m; j++)//rows { result_vector_h[i] += matr_h[j*n + i]; } } //print the resulting vector printf("resulting vector is -> \n | "); for (int i = 0; i < n; i++) { printf("%f ", result_vector_h[i]); } printf("\n\n ############### \n\n"); //cleanup free(matr_h); free(vec_h); free(result_vector_h); cudaFree(matr_d); cudaFree(vec_d); exit(0); }
58ee16879471c2a923f00e9d79b7c9ff59ec4171.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This program takes n and trial count as parameters and nothing more. // It is assumed that n is a power of 2. // Compiles with nvcc -std=c++11 -rdc=true -arch=compute_50 -code=sm_50 #include <chrono> #include <cstdlib> #include <ctime> #include <iostream> #include <utility> #include <math_functions.h> #include "include/fast-fourier.h" using namespace std; using namespace chrono; using namespace fast_fourier; void gen_array(cfloat* output, int n); long double average(long double* in, int n); long double std_dev(long double* in, int n, long double average); __global__ void run_test(cfloat* input, cfloat* output, int n, bool* binary_stor) { fast_fourier_transform(input, output, n, binary_stor); } int main(int argc, char** argv) { if (argc < 3) { cerr << "Usage is " << argv[0] << " n num_trials" << endl; return 1; } int n(atoi(argv[1])); int trial_count(atoi(argv[2])); cfloat* input(new cfloat[n]); cfloat* output(new cfloat[n]); cfloat* d_input(nullptr); cfloat* d_output(nullptr); bool* binary_stor(nullptr); long double times[trial_count]; high_resolution_clock::time_point tp2, tp1; duration<long double, ratio<1,1000> > time_span; // Allocate device arrays if (hipMalloc( &d_input, sizeof(cfloat) * n ) != hipSuccess) { auto t = hipGetLastError(); cout << "Failed to allocate input: " << hipGetErrorName(t) << ", " << hipGetErrorString(t) << endl; return 1; } if (hipMalloc( &d_output, sizeof(cfloat) * n ) != hipSuccess) { auto t = hipGetLastError(); cout << "Failed to allocate output: " << hipGetErrorName(t) << ", " << hipGetErrorString(t) << endl; return 1; } if (hipMalloc( &binary_stor, sizeof(bool) * ilogbf(n) ) != hipSuccess) { auto t = hipGetLastError(); cout << "Failed to allocate boolean storage: " << hipGetErrorName(t) << ", " << hipGetErrorString(t) << endl; return 1; } // Run experiment for (int j(0) ; j < trial_count ; j++) { // Generate random input gen_array(input, n); // Run the test tp1 = system_clock::now(); // Copy the input array to the GPU if (hipMemcpy( d_input, input, (long) n * sizeof(cfloat), hipMemcpyHostToDevice ) != hipSuccess) { auto t = hipGetLastError(); cout << "Iteration: " << j << " Input failed to copy: " << hipGetErrorName(t) << ", " << hipGetErrorString(t) << endl; return 1; } hipLaunchKernelGGL(( run_test), dim3(1),dim3(1), 0, 0, d_input, d_output, n, binary_stor); if (hipMemcpy( output, d_output, (long) n * sizeof(cfloat), hipMemcpyDeviceToHost ) != hipSuccess) { auto t = hipGetLastError(); cout << "Iteration: " << j << " Output failed to copy: " << hipGetErrorName(t) << ", " << hipGetErrorString(t) << endl; return 1; } tp2 = system_clock::now(); time_span = duration_cast< duration<long double, ratio<1,1000> > >(tp2 - tp1); times[j] = time_span.count(); } // Calculate statistics long double av(average(times, trial_count)); long double sd(std_dev(times, trial_count, av)); cout << av << "\t" << sd << endl; hipFree( binary_stor ); hipFree( d_input ); hipFree( d_output ); return 0; } void gen_array(cfloat* output, int n) { srand(time(nullptr)); for (int j = 0; j < n; j++) output[j] = cfloat(rand(), rand()); } long double average(long double* in, int n) { long double s(0.0); for (int j(0) ; j < n ; j++) s += in[j]; return s/n; } long double std_dev(long double* in, int n, long double average) { long double var = 0; long double tmp = 0; for (int i = 0 ; i < n ; i++) { tmp = (in[i] - average); var += tmp * tmp; } long double stdDev = sqrt(var/n); return stdDev; }
58ee16879471c2a923f00e9d79b7c9ff59ec4171.cu
// This program takes n and trial count as parameters and nothing more. // It is assumed that n is a power of 2. // Compiles with nvcc -std=c++11 -rdc=true -arch=compute_50 -code=sm_50 #include <chrono> #include <cstdlib> #include <ctime> #include <iostream> #include <utility> #include <math_functions.h> #include "include/fast-fourier.h" using namespace std; using namespace chrono; using namespace fast_fourier; void gen_array(cfloat* output, int n); long double average(long double* in, int n); long double std_dev(long double* in, int n, long double average); __global__ void run_test(cfloat* input, cfloat* output, int n, bool* binary_stor) { fast_fourier_transform(input, output, n, binary_stor); } int main(int argc, char** argv) { if (argc < 3) { cerr << "Usage is " << argv[0] << " n num_trials" << endl; return 1; } int n(atoi(argv[1])); int trial_count(atoi(argv[2])); cfloat* input(new cfloat[n]); cfloat* output(new cfloat[n]); cfloat* d_input(nullptr); cfloat* d_output(nullptr); bool* binary_stor(nullptr); long double times[trial_count]; high_resolution_clock::time_point tp2, tp1; duration<long double, ratio<1,1000> > time_span; // Allocate device arrays if (cudaMalloc( &d_input, sizeof(cfloat) * n ) != cudaSuccess) { auto t = cudaGetLastError(); cout << "Failed to allocate input: " << cudaGetErrorName(t) << ", " << cudaGetErrorString(t) << endl; return 1; } if (cudaMalloc( &d_output, sizeof(cfloat) * n ) != cudaSuccess) { auto t = cudaGetLastError(); cout << "Failed to allocate output: " << cudaGetErrorName(t) << ", " << cudaGetErrorString(t) << endl; return 1; } if (cudaMalloc( &binary_stor, sizeof(bool) * ilogbf(n) ) != cudaSuccess) { auto t = cudaGetLastError(); cout << "Failed to allocate boolean storage: " << cudaGetErrorName(t) << ", " << cudaGetErrorString(t) << endl; return 1; } // Run experiment for (int j(0) ; j < trial_count ; j++) { // Generate random input gen_array(input, n); // Run the test tp1 = system_clock::now(); // Copy the input array to the GPU if (cudaMemcpy( d_input, input, (long) n * sizeof(cfloat), cudaMemcpyHostToDevice ) != cudaSuccess) { auto t = cudaGetLastError(); cout << "Iteration: " << j << " Input failed to copy: " << cudaGetErrorName(t) << ", " << cudaGetErrorString(t) << endl; return 1; } run_test<<<1,1>>>(d_input, d_output, n, binary_stor); if (cudaMemcpy( output, d_output, (long) n * sizeof(cfloat), cudaMemcpyDeviceToHost ) != cudaSuccess) { auto t = cudaGetLastError(); cout << "Iteration: " << j << " Output failed to copy: " << cudaGetErrorName(t) << ", " << cudaGetErrorString(t) << endl; return 1; } tp2 = system_clock::now(); time_span = duration_cast< duration<long double, ratio<1,1000> > >(tp2 - tp1); times[j] = time_span.count(); } // Calculate statistics long double av(average(times, trial_count)); long double sd(std_dev(times, trial_count, av)); cout << av << "\t" << sd << endl; cudaFree( binary_stor ); cudaFree( d_input ); cudaFree( d_output ); return 0; } void gen_array(cfloat* output, int n) { srand(time(nullptr)); for (int j = 0; j < n; j++) output[j] = cfloat(rand(), rand()); } long double average(long double* in, int n) { long double s(0.0); for (int j(0) ; j < n ; j++) s += in[j]; return s/n; } long double std_dev(long double* in, int n, long double average) { long double var = 0; long double tmp = 0; for (int i = 0 ; i < n ; i++) { tmp = (in[i] - average); var += tmp * tmp; } long double stdDev = sqrt(var/n); return stdDev; }
9d03e2e791c01e4937bc819fed19b27ecfe5962b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2000-2020, Heiko Bauke // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. #include <cstdlib> #include <iostream> #include <vector> #include <trng/yarn5s.hpp> #include <trng/uniform01_dist.hpp> __global__ void parallel_pi(long samples, long *in, trng::yarn5s r) { long rank = threadIdx.x; long size = blockDim.x; r.jump(2 * (rank * samples / size)); // jump ahead trng::uniform01_dist<float> u; // random number distribution in[rank] = 0; // local number of points in circle for (long i = rank * samples / size; i < (rank + 1) * samples / size; ++i) { const float x = u(r), y = u(r); // choose random x- and y-coordinates if (x * x + y * y <= 1) // is point in circle? ++in[rank]; // increase thread-local counter } } int main(int argc, char *argv[]) { const long samples{1000000l}; // total number of points in square const int size{128}; // number of threads long *in_device; hipMalloc(&in_device, size * sizeof(*in_device)); trng::yarn5s r; // start parallel Monte Carlo hipLaunchKernelGGL(( parallel_pi), dim3(1), dim3(size), 0, 0, samples, in_device, r); // gather results std::vector<long> in(size); hipMemcpy(in.data(), in_device, size * sizeof(*in), hipMemcpyDeviceToHost); hipFree(in_device); long sum{0}; for (int rank{0}; rank < size; ++rank) sum += in[rank]; // print result std::cout << "pi = " << 4.0 * sum / samples << std::endl; return EXIT_SUCCESS; }
9d03e2e791c01e4937bc819fed19b27ecfe5962b.cu
// Copyright (c) 2000-2020, Heiko Bauke // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. #include <cstdlib> #include <iostream> #include <vector> #include <trng/yarn5s.hpp> #include <trng/uniform01_dist.hpp> __global__ void parallel_pi(long samples, long *in, trng::yarn5s r) { long rank = threadIdx.x; long size = blockDim.x; r.jump(2 * (rank * samples / size)); // jump ahead trng::uniform01_dist<float> u; // random number distribution in[rank] = 0; // local number of points in circle for (long i = rank * samples / size; i < (rank + 1) * samples / size; ++i) { const float x = u(r), y = u(r); // choose random x- and y-coordinates if (x * x + y * y <= 1) // is point in circle? ++in[rank]; // increase thread-local counter } } int main(int argc, char *argv[]) { const long samples{1000000l}; // total number of points in square const int size{128}; // number of threads long *in_device; cudaMalloc(&in_device, size * sizeof(*in_device)); trng::yarn5s r; // start parallel Monte Carlo parallel_pi<<<1, size>>>(samples, in_device, r); // gather results std::vector<long> in(size); cudaMemcpy(in.data(), in_device, size * sizeof(*in), cudaMemcpyDeviceToHost); cudaFree(in_device); long sum{0}; for (int rank{0}; rank < size; ++rank) sum += in[rank]; // print result std::cout << "pi = " << 4.0 * sum / samples << std::endl; return EXIT_SUCCESS; }
1adc1df554e80f0d186663babe60e423bd92b688.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu" #else #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_gatherKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THArgCheck(tensor->sizes().equals(index->sizes()), 4, "Index tensor must have the same size as output tensor."); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) == THCTensor_(sizeLegacyNoScalars)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(hipGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(hipGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(hipGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(hipGetLastError()); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); THCudaCheck(hipGetLastError()); } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) == THCudaLongTensor_sizeLegacyNoScalars(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #endif
1adc1df554e80f0d186663babe60e423bd92b688.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu" #else #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_gatherKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THArgCheck(tensor->sizes().equals(index->sizes()), 4, "Index tensor must have the same size as output tensor."); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) == THCTensor_(sizeLegacyNoScalars)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(cudaGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(cudaGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(cudaGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(cudaGetLastError()); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); THCudaCheck(cudaGetLastError()); } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) == THCudaLongTensor_sizeLegacyNoScalars(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #endif
f0697200ceb79cfe1f0467bb9a1b5dc36314ca31.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> #include <utilities/high_res_clock.h> #include <utilities/test_utilities.hpp> #include <converters/COOtoCSR.cuh> #include <rmm/device_vector.hpp> #include <cugraph/algorithms.hpp> #include <cugraph/legacy/graph.hpp> #include <rmm/device_vector.hpp> #include <thrust/device_ptr.h> #include <fstream> std::vector<int> getGoldenTopKIds(std::ifstream& fs_result, int k = 10) { std::vector<int> vec; int val; int count = 0; while (fs_result >> val && ((count++) < k)) { vec.push_back(val); } vec.resize(k); return vec; } std::vector<int> getTopKIds(double* p_katz, int count, int k = 10) { rmm::device_vector<int> id(count); thrust::sequence(rmm::exec_policy(rmm::cuda_stream_default), id.begin(), id.end()); thrust::sort_by_key(rmm::exec_policy(rmm::cuda_stream_default), p_katz, p_katz + count, id.begin(), thrust::greater<double>()); std::vector<int> topK(k); thrust::copy(id.begin(), id.begin() + k, topK.begin()); return topK; } template <typename VT, typename ET, typename WT> int getMaxDegree(cugraph::legacy::GraphCSRView<VT, ET, WT> const& g) { rmm::device_vector<ET> degree_vector(g.number_of_vertices); ET* p_degree = degree_vector.data().get(); g.degree(p_degree, cugraph::legacy::DegreeDirection::OUT); ET max_out_degree = thrust::reduce(rmm::exec_policy(rmm::cuda_stream_default), p_degree, p_degree + g.number_of_vertices, static_cast<ET>(-1), thrust::maximum<ET>()); return max_out_degree; } typedef struct Katz_Usecase_t { std::string matrix_file; std::string result_file; Katz_Usecase_t(const std::string& a, const std::string& b) { // assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir(); if ((a != "") && (a[0] != '/')) { matrix_file = rapidsDatasetRootDir + "/" + a; } else { matrix_file = a; } if ((b != "") && (b[0] != '/')) { result_file = rapidsDatasetRootDir + "/" + b; } else { result_file = b; } } Katz_Usecase_t& operator=(const Katz_Usecase_t& rhs) { matrix_file = rhs.matrix_file; result_file = rhs.result_file; return *this; } } Katz_Usecase; class Tests_Katz : public ::testing::TestWithParam<Katz_Usecase> { public: Tests_Katz() {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} void run_current_test(const Katz_Usecase& param) { FILE* fpin = fopen(param.matrix_file.c_str(), "r"); ASSERT_NE(fpin, nullptr) << "fopen (" << param.matrix_file << ") failure."; std::ifstream fs_result(param.result_file); ASSERT_EQ(fs_result.is_open(), true) << "file open (" << param.result_file << ") failure."; int m, k; int nnz; MM_typecode mc; ASSERT_EQ(cugraph::test::mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz), 0) << "could not read Matrix Market file properties" << "\n"; ASSERT_TRUE(mm_is_matrix(mc)); ASSERT_TRUE(mm_is_coordinate(mc)); ASSERT_FALSE(mm_is_complex(mc)); ASSERT_FALSE(mm_is_skew(mc)); // Allocate memory on host std::vector<int> cooRowInd(nnz), cooColInd(nnz); std::vector<int> cooVal(nnz); std::vector<double> katz_centrality(m); // Read ASSERT_EQ((cugraph::test::mm_to_coo<int, int>( fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], &cooVal[0], NULL)), 0) << "could not read matrix data" << "\n"; ASSERT_EQ(fclose(fpin), 0); cugraph::legacy::GraphCOOView<int, int, float> cooview( &cooColInd[0], &cooRowInd[0], nullptr, m, nnz); auto csr = cugraph::coo_to_csr(cooview); cugraph::legacy::GraphCSRView<int, int, float> G = csr->view(); rmm::device_vector<double> katz_vector(m); double* d_katz = thrust::raw_pointer_cast(katz_vector.data()); int max_out_degree = getMaxDegree(G); double alpha = 1 / (static_cast<double>(max_out_degree) + 1); cugraph::katz_centrality(G, d_katz, alpha, 100, 1e-6, false, true); auto threshold_ratio = 1e-3; auto threshold_magnitude = (1.0 / static_cast<double>(m)) * threshold_ratio; std::vector<int> top10CUGraph = getTopKIds(d_katz, m); std::vector<int> top10Golden = getGoldenTopKIds(fs_result); auto nearly_equal = [threshold_ratio, threshold_magnitude](auto lhs, auto rhs) { return std::abs(lhs - rhs) < ::max(::max(lhs, rhs) * threshold_ratio, threshold_magnitude); }; ASSERT_TRUE( std::equal(top10CUGraph.begin(), top10CUGraph.end(), top10Golden.begin(), nearly_equal)) << "Katz centrality values do not match with the reference values."; } }; INSTANTIATE_TEST_SUITE_P( simple_test, Tests_Katz, ::testing::Values(Katz_Usecase("test/datasets/karate.mtx", "ref/katz/karate.csv"), // Katz_Usecase("test/datasets/netscience.mtx", "ref/katz/netscience.csv"), Katz_Usecase("test/datasets/polbooks.mtx", "ref/katz/polbooks.csv"), Katz_Usecase("test/datasets/dolphins.mtx", "ref/katz/dolphins.csv"))); TEST_P(Tests_Katz, Check) { run_current_test(GetParam()); } CUGRAPH_TEST_PROGRAM_MAIN()
f0697200ceb79cfe1f0467bb9a1b5dc36314ca31.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> #include <utilities/high_res_clock.h> #include <utilities/test_utilities.hpp> #include <converters/COOtoCSR.cuh> #include <rmm/device_vector.hpp> #include <cugraph/algorithms.hpp> #include <cugraph/legacy/graph.hpp> #include <rmm/device_vector.hpp> #include <thrust/device_ptr.h> #include <fstream> std::vector<int> getGoldenTopKIds(std::ifstream& fs_result, int k = 10) { std::vector<int> vec; int val; int count = 0; while (fs_result >> val && ((count++) < k)) { vec.push_back(val); } vec.resize(k); return vec; } std::vector<int> getTopKIds(double* p_katz, int count, int k = 10) { rmm::device_vector<int> id(count); thrust::sequence(rmm::exec_policy(rmm::cuda_stream_default), id.begin(), id.end()); thrust::sort_by_key(rmm::exec_policy(rmm::cuda_stream_default), p_katz, p_katz + count, id.begin(), thrust::greater<double>()); std::vector<int> topK(k); thrust::copy(id.begin(), id.begin() + k, topK.begin()); return topK; } template <typename VT, typename ET, typename WT> int getMaxDegree(cugraph::legacy::GraphCSRView<VT, ET, WT> const& g) { rmm::device_vector<ET> degree_vector(g.number_of_vertices); ET* p_degree = degree_vector.data().get(); g.degree(p_degree, cugraph::legacy::DegreeDirection::OUT); ET max_out_degree = thrust::reduce(rmm::exec_policy(rmm::cuda_stream_default), p_degree, p_degree + g.number_of_vertices, static_cast<ET>(-1), thrust::maximum<ET>()); return max_out_degree; } typedef struct Katz_Usecase_t { std::string matrix_file; std::string result_file; Katz_Usecase_t(const std::string& a, const std::string& b) { // assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir(); if ((a != "") && (a[0] != '/')) { matrix_file = rapidsDatasetRootDir + "/" + a; } else { matrix_file = a; } if ((b != "") && (b[0] != '/')) { result_file = rapidsDatasetRootDir + "/" + b; } else { result_file = b; } } Katz_Usecase_t& operator=(const Katz_Usecase_t& rhs) { matrix_file = rhs.matrix_file; result_file = rhs.result_file; return *this; } } Katz_Usecase; class Tests_Katz : public ::testing::TestWithParam<Katz_Usecase> { public: Tests_Katz() {} static void SetupTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} void run_current_test(const Katz_Usecase& param) { FILE* fpin = fopen(param.matrix_file.c_str(), "r"); ASSERT_NE(fpin, nullptr) << "fopen (" << param.matrix_file << ") failure."; std::ifstream fs_result(param.result_file); ASSERT_EQ(fs_result.is_open(), true) << "file open (" << param.result_file << ") failure."; int m, k; int nnz; MM_typecode mc; ASSERT_EQ(cugraph::test::mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz), 0) << "could not read Matrix Market file properties" << "\n"; ASSERT_TRUE(mm_is_matrix(mc)); ASSERT_TRUE(mm_is_coordinate(mc)); ASSERT_FALSE(mm_is_complex(mc)); ASSERT_FALSE(mm_is_skew(mc)); // Allocate memory on host std::vector<int> cooRowInd(nnz), cooColInd(nnz); std::vector<int> cooVal(nnz); std::vector<double> katz_centrality(m); // Read ASSERT_EQ((cugraph::test::mm_to_coo<int, int>( fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], &cooVal[0], NULL)), 0) << "could not read matrix data" << "\n"; ASSERT_EQ(fclose(fpin), 0); cugraph::legacy::GraphCOOView<int, int, float> cooview( &cooColInd[0], &cooRowInd[0], nullptr, m, nnz); auto csr = cugraph::coo_to_csr(cooview); cugraph::legacy::GraphCSRView<int, int, float> G = csr->view(); rmm::device_vector<double> katz_vector(m); double* d_katz = thrust::raw_pointer_cast(katz_vector.data()); int max_out_degree = getMaxDegree(G); double alpha = 1 / (static_cast<double>(max_out_degree) + 1); cugraph::katz_centrality(G, d_katz, alpha, 100, 1e-6, false, true); auto threshold_ratio = 1e-3; auto threshold_magnitude = (1.0 / static_cast<double>(m)) * threshold_ratio; std::vector<int> top10CUGraph = getTopKIds(d_katz, m); std::vector<int> top10Golden = getGoldenTopKIds(fs_result); auto nearly_equal = [threshold_ratio, threshold_magnitude](auto lhs, auto rhs) { return std::abs(lhs - rhs) < std::max(std::max(lhs, rhs) * threshold_ratio, threshold_magnitude); }; ASSERT_TRUE( std::equal(top10CUGraph.begin(), top10CUGraph.end(), top10Golden.begin(), nearly_equal)) << "Katz centrality values do not match with the reference values."; } }; INSTANTIATE_TEST_SUITE_P( simple_test, Tests_Katz, ::testing::Values(Katz_Usecase("test/datasets/karate.mtx", "ref/katz/karate.csv"), // Katz_Usecase("test/datasets/netscience.mtx", "ref/katz/netscience.csv"), Katz_Usecase("test/datasets/polbooks.mtx", "ref/katz/polbooks.csv"), Katz_Usecase("test/datasets/dolphins.mtx", "ref/katz/dolphins.csv"))); TEST_P(Tests_Katz, Check) { run_current_test(GetParam()); } CUGRAPH_TEST_PROGRAM_MAIN()
2a4224c7640a5d7795dc1064fdb7ab3417544d10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #define N 8000 #define numThreads 512 __device__ float multiplyValues( float x, float y) { return x * y; } __global__ void elementwiseMultiply( int size, float *d_a, float *d_b, float *d_c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < size) { d_c[tid] = multiplyValues(d_a[tid], d_b[tid]); } } int main() { float *h_a,*h_b,*h_c; float *d_a, *d_b, *d_c; h_a = new float[N]; h_b = new float[N]; h_c = new float[N]; hipMalloc((void**)&d_a, N * sizeof(float)); hipMalloc((void**)&d_b, N * sizeof(float)); hipMalloc((void**)&d_c, N * sizeof(float)); for (int i = 0; i < N; i++) { h_a[i] = i+1; h_b[i] = i+2; } hipMemcpy(d_a, h_a, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, N*sizeof(float), hipMemcpyHostToDevice); elementwiseMultiply << <(N + numThreads - 1) / numThreads, numThreads >> >( N, d_a, d_b, d_c); hipMemcpy(h_c, d_c, N*sizeof(float), hipMemcpyDeviceToHost); std::cout << h_c[0] << std::endl; delete[] h_a; delete[] h_b; delete[] h_c; hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
2a4224c7640a5d7795dc1064fdb7ab3417544d10.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #define N 8000 #define numThreads 512 __device__ float multiplyValues( float x, float y) { return x * y; } __global__ void elementwiseMultiply( int size, float *d_a, float *d_b, float *d_c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < size) { d_c[tid] = multiplyValues(d_a[tid], d_b[tid]); } } int main() { float *h_a,*h_b,*h_c; float *d_a, *d_b, *d_c; h_a = new float[N]; h_b = new float[N]; h_c = new float[N]; cudaMalloc((void**)&d_a, N * sizeof(float)); cudaMalloc((void**)&d_b, N * sizeof(float)); cudaMalloc((void**)&d_c, N * sizeof(float)); for (int i = 0; i < N; i++) { h_a[i] = i+1; h_b[i] = i+2; } cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, N*sizeof(float), cudaMemcpyHostToDevice); elementwiseMultiply << <(N + numThreads - 1) / numThreads, numThreads >> >( N, d_a, d_b, d_c); cudaMemcpy(h_c, d_c, N*sizeof(float), cudaMemcpyDeviceToHost); std::cout << h_c[0] << std::endl; delete[] h_a; delete[] h_b; delete[] h_c; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
77ef229d1e31689c39313f80600da6c7e2b1af6e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "printVal.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( printVal), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( printVal), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( printVal), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
77ef229d1e31689c39313f80600da6c7e2b1af6e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "printVal.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); printVal<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { printVal<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { printVal<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0b1ee720a6b8baf4575cecb7606ee4e309db9bab.hip
// !!! This is a file automatically generated by hipify!!! // ****************************************** // implicit time stepping implementation of 2D diffusion problem // Ben Cumming, CSCS // ***************************************** // A small benchmark app that solves the 2D fisher equation using second-order // finite differences. // Syntax: ./main nx ny nt t #include <algorithm> #include <iostream> #include <sstream> #include <fstream> #include <cstdio> #include <cmath> #include <cstdlib> #include <cstring> #include <thrust/fill.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <omp.h> #include "linalg.h" #include "operators.h" #include "data.h" #include "stats.h" using namespace linalg; using namespace operators; using namespace data; using namespace stats; // read command line arguments static void readcmdline(Discretization& options, int argc, char* argv[]) { if (argc<5 || argc>6 ) { std::cerr << "Usage: main nx ny nt t\n"; std::cerr << " nx number of gridpoints in x-direction\n"; std::cerr << " ny number of gridpoints in y-direction\n"; std::cerr << " nt number of timesteps\n"; std::cerr << " t total time\n"; std::cerr << " v [optional] turn on verbose output\n"; exit(1); } // read nx options.nx = atoi(argv[1]); if (options.nx < 1) { std::cerr << "nx must be positive integer\n"; exit(-1); } // read ny options.ny = atoi(argv[2]); if (options.ny < 1) { std::cerr << "ny must be positive integer\n"; exit(-1); } options.N = options.nx*options.ny; // read nt options.nt = atoi(argv[3]); if (options.nt < 1) { std::cerr << "nt must be positive integer\n"; exit(-1); } // read total time double t = atof(argv[4]); if (t < 0) { std::cerr << "t must be positive real value\n"; exit(-1); } verbose_output = false; if( argc==6 ) { verbose_output = true; } // compute timestep size options.dt = t / options.nt; // compute the distance between grid points // assume that x dimension has length 1.0 options.dx = 1. / (options.nx - 1); // set alpha, assume diffusion coefficient D is 1 options.alpha = (options.dx * options.dx) / (1. * options.dt); } // ============================================================================== int main(int argc, char* argv[]) { // read command line arguments readcmdline(options, argc, argv); int nx = options.nx; int ny = options.ny; int nt = options.nt; // initialize cuda int device_count; cuda_check_status( hipGetDeviceCount(&device_count) ); if(device_count < 1) { std::cerr << "error: there should be at least one device per node" << std::endl; exit(-1); } cuda_check_status( hipSetDevice(0) ); // get the cublas handle to force cublas initialization outside the main time // stepping loop, to ensure that the timing doesn't count initialization costs auto handle = cublas_handle(); // set iteration parameters int max_cg_iters = 200; int max_newton_iters = 50; double tolerance = 1.e-6; int length = nx*ny; std::cout << "========================================================================" << std::endl; std::cout << " Welcome to mini-stencil!" << std::endl; std::cout << "version :: C++ with CUDA" << std::endl; std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl; std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;; std::cout << "iteration :: " << "CG " << max_cg_iters << ", Newton " << max_newton_iters << ", tolerance " << tolerance << std::endl;; std::cout << "========================================================================" << std::endl; thrust::device_vector<double> X_OLD(length); thrust::device_vector<double> B(length); thrust::device_vector<double> DELTAX(length); // set dirichlet boundary conditions to 0 all around thrust::device_vector<double> BND_E(ny,0.0); thrust::device_vector<double> BND_W(ny,0.0); thrust::device_vector<double> BND_S(nx,0.0); thrust::device_vector<double> BND_N(nx,0.0); // set the initial condition // a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius // no larger than 1/8 of both xdim and ydim // double *x_new = (double*) malloc(length*sizeof(*x_new)); thrust::host_vector<double> x_new(length,0.0); double xc = 1.0 / 4.0; double yc = (ny - 1) * options.dx / 4; double radius = fmin(xc, yc) / 2.0; for (int j = 0; j < ny; j++) { double y = (j - 1) * options.dx; for (int i = 0; i < nx; i++) { double x = (i - 1) * options.dx; if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius) x_new[i+nx*j] = 0.1; } } thrust::device_vector<double> X_NEW(x_new); flops_bc = 0; flops_diff = 0; flops_blas1 = 0; iters_cg = 0; iters_newton = 0; // start timer double timespent = -omp_get_wtime(); double dxs = 1000. * (options.dx * options.dx); // main timeloop for (int timestep = 1; timestep <= nt; timestep++) { // set x_new and x_old to be the solution X_OLD = X_NEW; double residual_thrust; bool converged = false; int it; for (it=0; it<max_newton_iters; it++) { // compute residual : requires both x_new and x_old diffusion_thrust(nx,ny,options.alpha,dxs, BND_W, BND_E, BND_S, BND_N, X_OLD, X_NEW, B); residual_thrust = norm2_thrust(B); // check for convergence if (residual_thrust < tolerance) { converged = true; break; } // solve linear system to get -deltax bool cg_converged_thrust = false; cg_thrust( BND_W, BND_E, BND_S, BND_N, X_OLD, DELTAX, B, max_cg_iters, tolerance, cg_converged_thrust); // check that the CG solver converged if (!cg_converged_thrust) break; // update solution axpy_thrust( -1.0, DELTAX, X_NEW ); // Thrust } iters_newton += it+1; // output some statistics if (converged && verbose_output) { std::cout << "step " << timestep << " required " << it << " iterations for residual " << residual_thrust << std::endl; } if (!converged) { std::cerr << "step " << timestep << " ERROR : nonlinear iterations failed to converge" << std::endl; break; } } // get times timespent += omp_get_wtime(); //////////////////////////////////////////////////////////////////// // write final solution to BOV file for visualization //////////////////////////////////////////////////////////////////// x_new = X_NEW; // binary data FILE* output = fopen("output.bin", "w"); fwrite(x_new.data(), sizeof(double), nx * ny, output); fclose(output); // meta data std::ofstream fid("output.bov"); fid << "TIME: 0.0" << std::endl; fid << "DATA_FILE: output.bin" << std::endl; fid << "DATA_SIZE: " << options.nx << " " << options.ny << " 1" << std::endl;; fid << "DATA_FORMAT: DOUBLE" << std::endl; fid << "VARIABLE: phi" << std::endl; fid << "DATA_ENDIAN: LITTLE" << std::endl; fid << "CENTERING: nodal" << std::endl; fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl; // print table sumarizing results std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "simulation took " << timespent << " seconds" << std::endl; std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of " << float(iters_cg)/timespent << " iters/second" << std::endl; std::cout << iters_newton << " newton iterations" << std::endl; std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "Goodbye!" << std::endl; return 0; }
0b1ee720a6b8baf4575cecb7606ee4e309db9bab.cu
// ****************************************** // implicit time stepping implementation of 2D diffusion problem // Ben Cumming, CSCS // ***************************************** // A small benchmark app that solves the 2D fisher equation using second-order // finite differences. // Syntax: ./main nx ny nt t #include <algorithm> #include <iostream> #include <sstream> #include <fstream> #include <cstdio> #include <cmath> #include <cstdlib> #include <cstring> #include <thrust/fill.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <omp.h> #include "linalg.h" #include "operators.h" #include "data.h" #include "stats.h" using namespace linalg; using namespace operators; using namespace data; using namespace stats; // read command line arguments static void readcmdline(Discretization& options, int argc, char* argv[]) { if (argc<5 || argc>6 ) { std::cerr << "Usage: main nx ny nt t\n"; std::cerr << " nx number of gridpoints in x-direction\n"; std::cerr << " ny number of gridpoints in y-direction\n"; std::cerr << " nt number of timesteps\n"; std::cerr << " t total time\n"; std::cerr << " v [optional] turn on verbose output\n"; exit(1); } // read nx options.nx = atoi(argv[1]); if (options.nx < 1) { std::cerr << "nx must be positive integer\n"; exit(-1); } // read ny options.ny = atoi(argv[2]); if (options.ny < 1) { std::cerr << "ny must be positive integer\n"; exit(-1); } options.N = options.nx*options.ny; // read nt options.nt = atoi(argv[3]); if (options.nt < 1) { std::cerr << "nt must be positive integer\n"; exit(-1); } // read total time double t = atof(argv[4]); if (t < 0) { std::cerr << "t must be positive real value\n"; exit(-1); } verbose_output = false; if( argc==6 ) { verbose_output = true; } // compute timestep size options.dt = t / options.nt; // compute the distance between grid points // assume that x dimension has length 1.0 options.dx = 1. / (options.nx - 1); // set alpha, assume diffusion coefficient D is 1 options.alpha = (options.dx * options.dx) / (1. * options.dt); } // ============================================================================== int main(int argc, char* argv[]) { // read command line arguments readcmdline(options, argc, argv); int nx = options.nx; int ny = options.ny; int nt = options.nt; // initialize cuda int device_count; cuda_check_status( cudaGetDeviceCount(&device_count) ); if(device_count < 1) { std::cerr << "error: there should be at least one device per node" << std::endl; exit(-1); } cuda_check_status( cudaSetDevice(0) ); // get the cublas handle to force cublas initialization outside the main time // stepping loop, to ensure that the timing doesn't count initialization costs auto handle = cublas_handle(); // set iteration parameters int max_cg_iters = 200; int max_newton_iters = 50; double tolerance = 1.e-6; int length = nx*ny; std::cout << "========================================================================" << std::endl; std::cout << " Welcome to mini-stencil!" << std::endl; std::cout << "version :: C++ with CUDA" << std::endl; std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl; std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;; std::cout << "iteration :: " << "CG " << max_cg_iters << ", Newton " << max_newton_iters << ", tolerance " << tolerance << std::endl;; std::cout << "========================================================================" << std::endl; thrust::device_vector<double> X_OLD(length); thrust::device_vector<double> B(length); thrust::device_vector<double> DELTAX(length); // set dirichlet boundary conditions to 0 all around thrust::device_vector<double> BND_E(ny,0.0); thrust::device_vector<double> BND_W(ny,0.0); thrust::device_vector<double> BND_S(nx,0.0); thrust::device_vector<double> BND_N(nx,0.0); // set the initial condition // a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius // no larger than 1/8 of both xdim and ydim // double *x_new = (double*) malloc(length*sizeof(*x_new)); thrust::host_vector<double> x_new(length,0.0); double xc = 1.0 / 4.0; double yc = (ny - 1) * options.dx / 4; double radius = fmin(xc, yc) / 2.0; for (int j = 0; j < ny; j++) { double y = (j - 1) * options.dx; for (int i = 0; i < nx; i++) { double x = (i - 1) * options.dx; if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius) x_new[i+nx*j] = 0.1; } } thrust::device_vector<double> X_NEW(x_new); flops_bc = 0; flops_diff = 0; flops_blas1 = 0; iters_cg = 0; iters_newton = 0; // start timer double timespent = -omp_get_wtime(); double dxs = 1000. * (options.dx * options.dx); // main timeloop for (int timestep = 1; timestep <= nt; timestep++) { // set x_new and x_old to be the solution X_OLD = X_NEW; double residual_thrust; bool converged = false; int it; for (it=0; it<max_newton_iters; it++) { // compute residual : requires both x_new and x_old diffusion_thrust(nx,ny,options.alpha,dxs, BND_W, BND_E, BND_S, BND_N, X_OLD, X_NEW, B); residual_thrust = norm2_thrust(B); // check for convergence if (residual_thrust < tolerance) { converged = true; break; } // solve linear system to get -deltax bool cg_converged_thrust = false; cg_thrust( BND_W, BND_E, BND_S, BND_N, X_OLD, DELTAX, B, max_cg_iters, tolerance, cg_converged_thrust); // check that the CG solver converged if (!cg_converged_thrust) break; // update solution axpy_thrust( -1.0, DELTAX, X_NEW ); // Thrust } iters_newton += it+1; // output some statistics if (converged && verbose_output) { std::cout << "step " << timestep << " required " << it << " iterations for residual " << residual_thrust << std::endl; } if (!converged) { std::cerr << "step " << timestep << " ERROR : nonlinear iterations failed to converge" << std::endl; break; } } // get times timespent += omp_get_wtime(); //////////////////////////////////////////////////////////////////// // write final solution to BOV file for visualization //////////////////////////////////////////////////////////////////// x_new = X_NEW; // binary data FILE* output = fopen("output.bin", "w"); fwrite(x_new.data(), sizeof(double), nx * ny, output); fclose(output); // meta data std::ofstream fid("output.bov"); fid << "TIME: 0.0" << std::endl; fid << "DATA_FILE: output.bin" << std::endl; fid << "DATA_SIZE: " << options.nx << " " << options.ny << " 1" << std::endl;; fid << "DATA_FORMAT: DOUBLE" << std::endl; fid << "VARIABLE: phi" << std::endl; fid << "DATA_ENDIAN: LITTLE" << std::endl; fid << "CENTERING: nodal" << std::endl; fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl; // print table sumarizing results std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "simulation took " << timespent << " seconds" << std::endl; std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of " << float(iters_cg)/timespent << " iters/second" << std::endl; std::cout << iters_newton << " newton iterations" << std::endl; std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "Goodbye!" << std::endl; return 0; }
bb3f755114a390671c2e2d06796e0e0254270c1d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<time.h> #include<stdlib.h> __global__ void func1(int *c,int *a,int *b,int n,int startvalue) { int i = blockIdx.x*blockDim.x + threadIdx.x; if( (i < n && i >= startvalue ) || i == startvalue) { a[i] = i * 2; b[i] = i * 3; i++; } } __global__ void func2(int *c,int *a,int *b,int n,int startvalue) { int i = blockIdx.x*blockDim.x + threadIdx.x; if( (i < n && i >= startvalue ) || i == startvalue) { c[i] = a[i] + b[i]; i++; } } int main() { int *d_c; int *d_a; int *d_b; int n=5,x; int a[n],b[n],c[n]; int i ; i=0; int startvalue; int blocks = 1024; int threads= 1024; hipMalloc((void **)&d_c, n*sizeof(int)); hipMemcpy(d_c, &c, n*sizeof(int), hipMemcpyHostToDevice); hipMalloc((void **)&d_a, n*sizeof(int)); hipMemcpy(d_a, &a, n*sizeof(int), hipMemcpyHostToDevice); hipMalloc((void **)&d_b, n*sizeof(int)); hipMemcpy(d_b, &b, n*sizeof(int), hipMemcpyHostToDevice); startvalue = i;hipLaunchKernelGGL(( func1), dim3(blocks), dim3(threads), 0, 0, d_c,d_a,d_b,n,startvalue); hipDeviceSynchronize(); hipMemcpy(&c, d_c, n*sizeof(int), hipMemcpyDeviceToHost); hipFree(d_c); hipMemcpy(&a, d_a, n*sizeof(int), hipMemcpyDeviceToHost); hipFree(d_a); hipMemcpy(&b, d_b, n*sizeof(int), hipMemcpyDeviceToHost); hipFree(d_b); i=0; hipMalloc((void **)&d_c, n*sizeof(int)); hipMemcpy(d_c, &c, n*sizeof(int), hipMemcpyHostToDevice); hipMalloc((void **)&d_a, n*sizeof(int)); hipMemcpy(d_a, &a, n*sizeof(int), hipMemcpyHostToDevice); hipMalloc((void **)&d_b, n*sizeof(int)); hipMemcpy(d_b, &b, n*sizeof(int), hipMemcpyHostToDevice); startvalue = i;hipLaunchKernelGGL(( func2), dim3(blocks), dim3(threads), 0, 0, d_c,d_a,d_b,n,startvalue); hipDeviceSynchronize(); hipMemcpy(&c, d_c, n*sizeof(int), hipMemcpyDeviceToHost); hipFree(d_c); hipMemcpy(&a, d_a, n*sizeof(int), hipMemcpyDeviceToHost); hipFree(d_a); hipMemcpy(&b, d_b, n*sizeof(int), hipMemcpyDeviceToHost); hipFree(d_b); i=0; do{ printf("c =%d\n",c[i]); i++; }while(i<n) ;}
bb3f755114a390671c2e2d06796e0e0254270c1d.cu
#include<stdio.h> #include<time.h> #include<stdlib.h> __global__ void func1(int *c,int *a,int *b,int n,int startvalue) { int i = blockIdx.x*blockDim.x + threadIdx.x; if( (i < n && i >= startvalue ) || i == startvalue) { a[i] = i * 2; b[i] = i * 3; i++; } } __global__ void func2(int *c,int *a,int *b,int n,int startvalue) { int i = blockIdx.x*blockDim.x + threadIdx.x; if( (i < n && i >= startvalue ) || i == startvalue) { c[i] = a[i] + b[i]; i++; } } int main() { int *d_c; int *d_a; int *d_b; int n=5,x; int a[n],b[n],c[n]; int i ; i=0; int startvalue; int blocks = 1024; int threads= 1024; cudaMalloc((void **)&d_c, n*sizeof(int)); cudaMemcpy(d_c, &c, n*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&d_a, n*sizeof(int)); cudaMemcpy(d_a, &a, n*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&d_b, n*sizeof(int)); cudaMemcpy(d_b, &b, n*sizeof(int), cudaMemcpyHostToDevice); startvalue = i; func1<<<blocks, threads>>>(d_c,d_a,d_b,n,startvalue); cudaDeviceSynchronize(); cudaMemcpy(&c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_c); cudaMemcpy(&a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaMemcpy(&b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_b); i=0; cudaMalloc((void **)&d_c, n*sizeof(int)); cudaMemcpy(d_c, &c, n*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&d_a, n*sizeof(int)); cudaMemcpy(d_a, &a, n*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&d_b, n*sizeof(int)); cudaMemcpy(d_b, &b, n*sizeof(int), cudaMemcpyHostToDevice); startvalue = i; func2<<<blocks, threads>>>(d_c,d_a,d_b,n,startvalue); cudaDeviceSynchronize(); cudaMemcpy(&c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_c); cudaMemcpy(&a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_a); cudaMemcpy(&b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_b); i=0; do{ printf("c =%d\n",c[i]); i++; }while(i<n) ;}
a750a92d671856d77e375afb229182ae706926e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define BLOCK_SIZE 4 typedef struct options { int loops; int size; char* input_file; char* output_file; } program_options; program_options options; options.input_file = NULL; options.output_file = NULL; void parse_command_line_arguments(int argc, char* argv[]) { for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "-l")) { options.loops = atoi(argv[i+1]); } else if (!strcmp(argv[i], "-n")) { options.size = atoi(argv[i+1]); } else if (!strcmp(argv[i], "-i")) { options.input_file = strdup(argv[i+1]); } else if (!strcmp(argv[i], "-o")) { options.output_file = strdup(argv[i+1]); } } } __device__ char get_cell_inbounds(int x, int y, int size, char* grid) { if (x >= 0 && x < size && y >= 0 && y < size) { return grid[x * size + y]; } return '0'; } __device__ int count_alive_neighbours(int x, int y, int size, char* grid) { int alive_neighbours = (get_cell_inbounds(x-1, y, size, grid) == '1') + (get_cell_inbounds(x+1, y, size, grid) == '1') + (get_cell_inbounds(x, y-1, size, grid) == '1') + (get_cell_inbounds(x, y+1, size, grid) == '1') + (get_cell_inbounds(x-1, y-1, size, grid) == '1') + (get_cell_inbounds(x-1, y+1, size, grid) == '1') + (get_cell_inbounds(x+1, y-1, size, grid) == '1') + (get_cell_inbounds(x+1, y+1, size, grid) == '1'); return alive_neighbours; } __device__ void apply_game_rules(int index, char* cur_grid, char* next_grid, int alive_neighbours) { if (cur_grid[index] == '1') { // 0 or 1 neighbours -> the cell dies if (alive_neighbours < 2) { next_grid[index] = '0'; } // 2 or 3 neighbours -> the cell survives else if (alive_neighbours < 4) { next_grid[index] = '1'; } // more than 4 neighbours -> the cell dies due to overpopulation else { next_grid[index] = '0'; } } // rules regarding dead cells else { // exactly 3 neighbours -> a new cell is born if (alive_neighbours == 3) { next_grid[index] = '1'; } else { next_grid[index] = '0'; } } } __global__ void evolution(char* cur_grid, char* next_grid, int size) { int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; int y = blockIdx.y * BLOCK_SIZE + threadIdx.y; int index = x * size + y; int alive_neighbours = count_alive_neighbours(x, y, size, cur_grid); apply_game_rules(index, cur_grid, next_grid, alive_neighbours); } void read_input(const char* input_file, char* grid) { FILE *fp; int i = 0; char c; fp = fopen(input_file, "r"); while ((c = (char) fgetc(fp)) != '\n') { grid[i++] = c; } } int main(int argc, char* argv[]) { char* h_grid; // Grid on host char* d_grid; // Grid on device char* d_next_gen_grid; // Next generation grid used on device char* d_tmp_grid; // tmp grid pointer used to switch between grid and next_gen_grid float time; hipEvent_t start, stop; parse_command_line_arguments(argc, argv); size_t grid_bytes = options.size * options.size * sizeof(char); // Allocate memory for host grid h_grid = (char*) malloc(grid_bytes); // Allocate memory for device grids hipMalloc((void **)&d_grid, grid_bytes); hipMalloc((void **)&d_next_gen_grid, grid_bytes); // Read input file to host grid and copy over device grid read_input(options.input_file, h_grid); hipMemcpy(d_grid, h_grid, grid_bytes, hipMemcpyHostToDevice); // Define block size as well as number of blocks dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 grid_size(options.size / BLOCK_SIZE, options.size / BLOCK_SIZE); HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); HANDLE_ERROR(hipEventRecord(start, 0)); for (int i = 0; i < options.loops; ++i) { hipLaunchKernelGGL(( evolution), dim3(grid_size), dim3(block_size), 0, 0, d_grid, d_next_gen_grid, options.size); d_tmp_grid = d_grid; d_grid = d_next_gen_grid; d_next_gen_grid = d_tmp_grid; hipMemcpy(h_grid, d_next_gen_grid, grid_bytes, hipMemcpyDeviceToHost); } // Copy results back to host grid hipMemcpy(h_grid, d_grid, grid_bytes, hipMemcpyDeviceToHost); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&time, start, stop)); printf("Time elapsed: %3.1f ms\n", time); // Free resources hipFree(d_grid); hipFree(d_next_gen_grid); free(h_grid); if (options.input_file != NULL) { free(options.input_file); } if (options.output_file != NULL) { free(options.output_file); } return 0; }
a750a92d671856d77e375afb229182ae706926e3.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define BLOCK_SIZE 4 typedef struct options { int loops; int size; char* input_file; char* output_file; } program_options; program_options options; options.input_file = NULL; options.output_file = NULL; void parse_command_line_arguments(int argc, char* argv[]) { for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "-l")) { options.loops = atoi(argv[i+1]); } else if (!strcmp(argv[i], "-n")) { options.size = atoi(argv[i+1]); } else if (!strcmp(argv[i], "-i")) { options.input_file = strdup(argv[i+1]); } else if (!strcmp(argv[i], "-o")) { options.output_file = strdup(argv[i+1]); } } } __device__ char get_cell_inbounds(int x, int y, int size, char* grid) { if (x >= 0 && x < size && y >= 0 && y < size) { return grid[x * size + y]; } return '0'; } __device__ int count_alive_neighbours(int x, int y, int size, char* grid) { int alive_neighbours = (get_cell_inbounds(x-1, y, size, grid) == '1') + (get_cell_inbounds(x+1, y, size, grid) == '1') + (get_cell_inbounds(x, y-1, size, grid) == '1') + (get_cell_inbounds(x, y+1, size, grid) == '1') + (get_cell_inbounds(x-1, y-1, size, grid) == '1') + (get_cell_inbounds(x-1, y+1, size, grid) == '1') + (get_cell_inbounds(x+1, y-1, size, grid) == '1') + (get_cell_inbounds(x+1, y+1, size, grid) == '1'); return alive_neighbours; } __device__ void apply_game_rules(int index, char* cur_grid, char* next_grid, int alive_neighbours) { if (cur_grid[index] == '1') { // 0 or 1 neighbours -> the cell dies if (alive_neighbours < 2) { next_grid[index] = '0'; } // 2 or 3 neighbours -> the cell survives else if (alive_neighbours < 4) { next_grid[index] = '1'; } // more than 4 neighbours -> the cell dies due to overpopulation else { next_grid[index] = '0'; } } // rules regarding dead cells else { // exactly 3 neighbours -> a new cell is born if (alive_neighbours == 3) { next_grid[index] = '1'; } else { next_grid[index] = '0'; } } } __global__ void evolution(char* cur_grid, char* next_grid, int size) { int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; int y = blockIdx.y * BLOCK_SIZE + threadIdx.y; int index = x * size + y; int alive_neighbours = count_alive_neighbours(x, y, size, cur_grid); apply_game_rules(index, cur_grid, next_grid, alive_neighbours); } void read_input(const char* input_file, char* grid) { FILE *fp; int i = 0; char c; fp = fopen(input_file, "r"); while ((c = (char) fgetc(fp)) != '\n') { grid[i++] = c; } } int main(int argc, char* argv[]) { char* h_grid; // Grid on host char* d_grid; // Grid on device char* d_next_gen_grid; // Next generation grid used on device char* d_tmp_grid; // tmp grid pointer used to switch between grid and next_gen_grid float time; cudaEvent_t start, stop; parse_command_line_arguments(argc, argv); size_t grid_bytes = options.size * options.size * sizeof(char); // Allocate memory for host grid h_grid = (char*) malloc(grid_bytes); // Allocate memory for device grids cudaMalloc((void **)&d_grid, grid_bytes); cudaMalloc((void **)&d_next_gen_grid, grid_bytes); // Read input file to host grid and copy over device grid read_input(options.input_file, h_grid); cudaMemcpy(d_grid, h_grid, grid_bytes, cudaMemcpyHostToDevice); // Define block size as well as number of blocks dim3 block_size(BLOCK_SIZE, BLOCK_SIZE); dim3 grid_size(options.size / BLOCK_SIZE, options.size / BLOCK_SIZE); HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); HANDLE_ERROR(cudaEventRecord(start, 0)); for (int i = 0; i < options.loops; ++i) { evolution<<<grid_size, block_size>>>(d_grid, d_next_gen_grid, options.size); d_tmp_grid = d_grid; d_grid = d_next_gen_grid; d_next_gen_grid = d_tmp_grid; cudaMemcpy(h_grid, d_next_gen_grid, grid_bytes, cudaMemcpyDeviceToHost); } // Copy results back to host grid cudaMemcpy(h_grid, d_grid, grid_bytes, cudaMemcpyDeviceToHost); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop)); printf("Time elapsed: %3.1f ms\n", time); // Free resources cudaFree(d_grid); cudaFree(d_next_gen_grid); free(h_grid); if (options.input_file != NULL) { free(options.input_file); } if (options.output_file != NULL) { free(options.output_file); } return 0; }
bc00af6e595f71fd51eed9d49eef05ee26b6d197.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/common/data_type.h" #include "oneflow/core/common/preprocessor.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/framework/dtype.h" #include "oneflow/user/kernels/distributions/uniform_int_distribution.h" #include "oneflow/user/kernels/distributions/distribution_template_util.cuh" #include "oneflow/core/ep/include/device.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { template<typename T, typename ComputeType> struct UniformIntTransformFunctor { UniformIntTransformFunctor(ComputeType low, ComputeType high) : low(low), high(high) {} __device__ T operator()(ComputeType rand_num) const { if (rand_num == 1.0) { rand_num = 0.0; } return static_cast<T>(static_cast<int64_t>(rand_num * (high - low) + low)); } ComputeType low; ComputeType high; }; template<typename T> void UniformIntDistribution<DeviceType::kCUDA, T>::operator()( ep::Stream* stream, const int64_t elem_cnt, T* dptr, const std::shared_ptr<one::Generator>& generator) const { CHECK_GE(elem_cnt, 0); if (elem_cnt == 0) return; const auto device_index = stream->device()->device_index(); auto gen = CHECK_JUST(generator->Get<ep::CUDAGenerator>(device_index)); ep::CudaStream* cuda_stream = stream->As<ep::CudaStream>(); auto execution_policy = gen->CalcExecutionPolicy(elem_cnt, cuda_stream); auto counter_offset = std::get<0>(execution_policy); auto grid = std::get<1>(execution_policy); auto block = std::get<2>(execution_policy); uint64_t seed = gen->current_seed(); uint64_t offset = gen->get_philox_offset(counter_offset); using ComputeType = typename distribution::DefaultComputeType<T>::type; UniformIntTransformFunctor<T, ComputeType> transform_functor(low_, high_); if (std::is_same<T, double>::value) { DistributionFunctor<DistributionOp::kUniform2Double> dist_functor; hipLaunchKernelGGL(( DistributionElementwiseGridStrideKernel<T, ComputeType, 2, decltype(dist_functor), decltype(transform_functor)>) , dim3(grid), dim3(block), 0, stream->As<ep::CudaStream>()->cuda_stream(), elem_cnt, seed, offset, dptr, dist_functor, transform_functor); } else { DistributionFunctor<DistributionOp::kUniform4> dist_functor; hipLaunchKernelGGL(( DistributionElementwiseGridStrideKernel<T, ComputeType, 4, decltype(dist_functor), decltype(transform_functor)>) , dim3(grid), dim3(block), 0, stream->As<ep::CudaStream>()->cuda_stream(), elem_cnt, seed, offset, dptr, dist_functor, transform_functor); } } #define INITIATE_CUDA_UNIFORM_INT_DISTRIBUTION(T, typeproto) \ template void UniformIntDistribution<DeviceType::kCUDA, T>::operator()( \ ep::Stream* stream, const int64_t elem_cnt, T* dptr, \ const std::shared_ptr<one::Generator>& generator) const; OF_PP_FOR_EACH_TUPLE(INITIATE_CUDA_UNIFORM_INT_DISTRIBUTION, FLOATING_DATA_TYPE_SEQ) OF_PP_FOR_EACH_TUPLE(INITIATE_CUDA_UNIFORM_INT_DISTRIBUTION, INT_DATA_TYPE_SEQ) OF_PP_FOR_EACH_TUPLE(INITIATE_CUDA_UNIFORM_INT_DISTRIBUTION, UNSIGNED_INT_DATA_TYPE_SEQ) } // namespace oneflow
bc00af6e595f71fd51eed9d49eef05ee26b6d197.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/common/data_type.h" #include "oneflow/core/common/preprocessor.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/framework/dtype.h" #include "oneflow/user/kernels/distributions/uniform_int_distribution.h" #include "oneflow/user/kernels/distributions/distribution_template_util.cuh" #include "oneflow/core/ep/include/device.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { template<typename T, typename ComputeType> struct UniformIntTransformFunctor { UniformIntTransformFunctor(ComputeType low, ComputeType high) : low(low), high(high) {} __device__ T operator()(ComputeType rand_num) const { if (rand_num == 1.0) { rand_num = 0.0; } return static_cast<T>(static_cast<int64_t>(rand_num * (high - low) + low)); } ComputeType low; ComputeType high; }; template<typename T> void UniformIntDistribution<DeviceType::kCUDA, T>::operator()( ep::Stream* stream, const int64_t elem_cnt, T* dptr, const std::shared_ptr<one::Generator>& generator) const { CHECK_GE(elem_cnt, 0); if (elem_cnt == 0) return; const auto device_index = stream->device()->device_index(); auto gen = CHECK_JUST(generator->Get<ep::CUDAGenerator>(device_index)); ep::CudaStream* cuda_stream = stream->As<ep::CudaStream>(); auto execution_policy = gen->CalcExecutionPolicy(elem_cnt, cuda_stream); auto counter_offset = std::get<0>(execution_policy); auto grid = std::get<1>(execution_policy); auto block = std::get<2>(execution_policy); uint64_t seed = gen->current_seed(); uint64_t offset = gen->get_philox_offset(counter_offset); using ComputeType = typename distribution::DefaultComputeType<T>::type; UniformIntTransformFunctor<T, ComputeType> transform_functor(low_, high_); if (std::is_same<T, double>::value) { DistributionFunctor<DistributionOp::kUniform2Double> dist_functor; DistributionElementwiseGridStrideKernel<T, ComputeType, 2, decltype(dist_functor), decltype(transform_functor)> <<<grid, block, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( elem_cnt, seed, offset, dptr, dist_functor, transform_functor); } else { DistributionFunctor<DistributionOp::kUniform4> dist_functor; DistributionElementwiseGridStrideKernel<T, ComputeType, 4, decltype(dist_functor), decltype(transform_functor)> <<<grid, block, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( elem_cnt, seed, offset, dptr, dist_functor, transform_functor); } } #define INITIATE_CUDA_UNIFORM_INT_DISTRIBUTION(T, typeproto) \ template void UniformIntDistribution<DeviceType::kCUDA, T>::operator()( \ ep::Stream* stream, const int64_t elem_cnt, T* dptr, \ const std::shared_ptr<one::Generator>& generator) const; OF_PP_FOR_EACH_TUPLE(INITIATE_CUDA_UNIFORM_INT_DISTRIBUTION, FLOATING_DATA_TYPE_SEQ) OF_PP_FOR_EACH_TUPLE(INITIATE_CUDA_UNIFORM_INT_DISTRIBUTION, INT_DATA_TYPE_SEQ) OF_PP_FOR_EACH_TUPLE(INITIATE_CUDA_UNIFORM_INT_DISTRIBUTION, UNSIGNED_INT_DATA_TYPE_SEQ) } // namespace oneflow
24755245ab34874536920df8fad957c4c3af017a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #define row1 10 /* Number of rows of first matrix */ #define col1 10 /* Number of columns of first matrix */ #define row2 10 /* Number of rows of second matrix */ #define col2 10 /* Number of columns of second matrix */ typedef long long int LLI; __global__ void matproductsharedmemory(LLI *l,LLI *m, LLI *n) { LLI x=blockIdx.x; LLI y=blockIdx.y; __shared__ LLI p[col1]; LLI i; LLI k=threadIdx.x; n[col2*y+x]=0; p[k]=l[col1*y+k]*m[col2*k+x]; __syncthreads(); for(i=0;i<col1;i++) n[col2*y+x]=n[col2*y+x]+p[i]; } int main() { LLI a[row1][col1]; LLI b[row2][col2]; LLI c[row1][col2]; LLI *d,*e,*f; LLI i,j; // prLLIf("\n Enter elements of first matrix of size 2*3\n"); for(i=0;i<row1;i++) { for(j=0;j<col1;j++) { a[i][j]= i*row1+j; } } // prLLIf("\n Enter elements of second matrix of size 3*2\n"); for(i=0;i<row2;i++) { for(j=0;j<col2;j++) { b[i][j]=i*row2+j; } } hipMalloc((void **)&d,row1*col1*sizeof(LLI)); hipMalloc((void **)&e,row2*col2*sizeof(LLI)); hipMalloc((void **)&f,row1*col2*sizeof(LLI)); hipMemcpy(d,a,row1*col1*sizeof(LLI),hipMemcpyHostToDevice); hipMemcpy(e,b,row2*col2*sizeof(LLI),hipMemcpyHostToDevice); dim3 grid(col2,row1); /* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */ hipLaunchKernelGGL(( matproductsharedmemory), dim3(grid),dim3(col1), 0, 0, d,e,f); hipMemcpy(c,f,row1*col2*sizeof(LLI),hipMemcpyDeviceToHost); printf("\n Product of two matrices:\n "); for(i=0;i<row1;i++) { for(j=0;j<col2;j++) { printf("%Ld\t",c[i][j]); } printf("\n"); } hipFree(d); hipFree(e); hipFree(f); return 0; } /* OUTPUT profile ==13282== NVPROF is profiling process 13282, command: ./a.out Product of two matrices: 32835000 32839950 32844900 32849850 32854800 32859750 32864700 32869650 32874600 32879550 32884500 32889450 32894400 32899350 32904300 32909250 32914200 32919150 32924100 32929050 32934000 32938950 32943900 32948850 32953800 32958750 32963700 32968650 32973600 32978550 32983500 32988450 32993400 32998350 33003300 33008250 33013200 33018150 33023100 33028050 33033000 33037950 33042900 33047850 33052800 33057750 33062700 33067650 33072600 33077550 33082500 33087450 33092400 33097350 33102300 33107250 33112200 33117150 33122100 33127050 33132000 33136950 33141900 33146850 33151800 33156750 33161700 33166650 33171600 33176550 33181500 33186450 33191400 33196350 33201300 33206250 33211200 33216150 33221100 33226050 33231000 33235950 33240900 33245850 33250800 33255750 33260700 33265650 33270600 33275550 33280500 33285450 33290400 33295350 33300300 33305250 33310200 33315150 33320100 33325050 82335000 82349950 82364900 82379850 82394800 82409750 82424700 82439650 82454600 82469550 82484500 82499450 82514400 82529350 82544300 82559250 82574200 82589150 82604100 82619050 82634000 82648950 82663900 82678850 82693800 82708750 82723700 82738650 82753600 82768550 82783500 82798450 82813400 82828350 82843300 82858250 82873200 82888150 82903100 82918050 82933000 82947950 82962900 82977850 82992800 83007750 83022700 83037650 83052600 83067550 83082500 83097450 83112400 83127350 83142300 83157250 83172200 83187150 83202100 83217050 83232000 83246950 83261900 83276850 83291800 83306750 83321700 83336650 83351600 83366550 83381500 83396450 83411400 83426350 83441300 83456250 83471200 83486150 83501100 83516050 83531000 83545950 83560900 83575850 83590800 83605750 83620700 83635650 83650600 83665550 83680500 83695450 83710400 83725350 83740300 83755250 83770200 83785150 83800100 83815050 131835000 131859950 131884900 131909850 131934800 131959750 131984700 132009650 132034600 132059550 132084500 132109450 132134400 132159350 132184300 132209250 132234200 132259150 132284100 132309050 132334000 132358950 132383900 132408850 132433800 132458750 132483700 132508650 132533600 132558550 132583500 132608450 132633400 132658350 132683300 132708250 132733200 132758150 132783100 132808050 132833000 132857950 132882900 132907850 132932800 132957750 132982700 133007650 133032600 133057550 133082500 133107450 133132400 133157350 133182300 133207250 133232200 133257150 133282100 133307050 133332000 133356950 133381900 133406850 133431800 133456750 133481700 133506650 133531600 133556550 133581500 133606450 133631400 133656350 133681300 133706250 133731200 133756150 133781100 133806050 133831000 133855950 133880900 133905850 133930800 133955750 133980700 134005650 134030600 134055550 134080500 134105450 134130400 134155350 134180300 134205250 134230200 134255150 134280100 134305050 181335000 181369950 181404900 181439850 181474800 181509750 181544700 181579650 181614600 181649550 181684500 181719450 181754400 181789350 181824300 181859250 181894200 181929150 181964100 181999050 182034000 182068950 182103900 182138850 182173800 182208750 182243700 182278650 182313600 182348550 182383500 182418450 182453400 182488350 182523300 182558250 182593200 182628150 182663100 182698050 182733000 182767950 182802900 182837850 182872800 182907750 182942700 182977650 183012600 183047550 183082500 183117450 183152400 183187350 183222300 183257250 183292200 183327150 183362100 183397050 183432000 183466950 183501900 183536850 183571800 183606750 183641700 183676650 183711600 183746550 183781500 183816450 183851400 183886350 183921300 183956250 183991200 184026150 184061100 184096050 184131000 184165950 184200900 184235850 184270800 184305750 184340700 184375650 184410600 184445550 184480500 184515450 184550400 184585350 184620300 184655250 184690200 184725150 184760100 184795050 230835000 230879950 230924900 230969850 231014800 231059750 231104700 231149650 231194600 231239550 231284500 231329450 231374400 231419350 231464300 231509250 231554200 231599150 231644100 231689050 231734000 231778950 231823900 231868850 231913800 231958750 232003700 232048650 232093600 232138550 232183500 232228450 232273400 232318350 232363300 232408250 232453200 232498150 232543100 232588050 232633000 232677950 232722900 232767850 232812800 232857750 232902700 232947650 232992600 233037550 233082500 233127450 233172400 233217350 233262300 233307250 233352200 233397150 233442100 233487050 233532000 233576950 233621900 233666850 233711800 233756750 233801700 233846650 233891600 233936550 233981500 234026450 234071400 234116350 234161300 234206250 234251200 234296150 234341100 234386050 234431000 234475950 234520900 234565850 234610800 234655750 234700700 234745650 234790600 234835550 234880500 234925450 234970400 235015350 235060300 235105250 235150200 235195150 235240100 235285050 280335000 280389950 280444900 280499850 280554800 280609750 280664700 280719650 280774600 280829550 280884500 280939450 280994400 281049350 281104300 281159250 281214200 281269150 281324100 281379050 281434000 281488950 281543900 281598850 281653800 281708750 281763700 281818650 281873600 281928550 281983500 282038450 282093400 282148350 282203300 282258250 282313200 282368150 282423100 282478050 282533000 282587950 282642900 282697850 282752800 282807750 282862700 282917650 282972600 283027550 283082500 283137450 283192400 283247350 283302300 283357250 283412200 283467150 283522100 283577050 283632000 283686950 283741900 283796850 283851800 283906750 283961700 284016650 284071600 284126550 284181500 284236450 284291400 284346350 284401300 284456250 284511200 284566150 284621100 284676050 284731000 284785950 284840900 284895850 284950800 285005750 285060700 285115650 285170600 285225550 285280500 285335450 285390400 285445350 285500300 285555250 285610200 285665150 285720100 285775050 329835000 329899950 329964900 330029850 330094800 330159750 330224700 330289650 330354600 330419550 330484500 330549450 330614400 330679350 330744300 330809250 330874200 330939150 331004100 331069050 331134000 331198950 331263900 331328850 331393800 331458750 331523700 331588650 331653600 331718550 331783500 331848450 331913400 331978350 332043300 332108250 332173200 332238150 332303100 332368050 332433000 332497950 332562900 332627850 332692800 332757750 332822700 332887650 332952600 333017550 333082500 333147450 333212400 333277350 333342300 333407250 333472200 333537150 333602100 333667050 333732000 333796950 333861900 333926850 333991800 334056750 334121700 334186650 334251600 334316550 334381500 334446450 334511400 334576350 334641300 334706250 334771200 334836150 334901100 334966050 335031000 335095950 335160900 335225850 335290800 335355750 335420700 335485650 335550600 335615550 335680500 335745450 335810400 335875350 335940300 336005250 336070200 336135150 336200100 336265050 379335000 379409950 379484900 379559850 379634800 379709750 379784700 379859650 379934600 380009550 380084500 380159450 380234400 380309350 380384300 380459250 380534200 380609150 380684100 380759050 380834000 380908950 380983900 381058850 381133800 381208750 381283700 381358650 381433600 381508550 381583500 381658450 381733400 381808350 381883300 381958250 382033200 382108150 382183100 382258050 382333000 382407950 382482900 382557850 382632800 382707750 382782700 382857650 382932600 383007550 383082500 383157450 383232400 383307350 383382300 383457250 383532200 383607150 383682100 383757050 383832000 383906950 383981900 384056850 384131800 384206750 384281700 384356650 384431600 384506550 384581500 384656450 384731400 384806350 384881300 384956250 385031200 385106150 385181100 385256050 385331000 385405950 385480900 385555850 385630800 385705750 385780700 385855650 385930600 386005550 386080500 386155450 386230400 386305350 386380300 386455250 386530200 386605150 386680100 386755050 428835000 428919950 429004900 429089850 429174800 429259750 429344700 429429650 429514600 429599550 429684500 429769450 429854400 429939350 430024300 430109250 430194200 430279150 430364100 430449050 430534000 430618950 430703900 430788850 430873800 430958750 431043700 431128650 431213600 431298550 431383500 431468450 431553400 431638350 431723300 431808250 431893200 431978150 432063100 432148050 432233000 432317950 432402900 432487850 432572800 432657750 432742700 432827650 432912600 432997550 433082500 433167450 433252400 433337350 433422300 433507250 433592200 433677150 433762100 433847050 433932000 434016950 434101900 434186850 434271800 434356750 434441700 434526650 434611600 434696550 434781500 434866450 434951400 435036350 435121300 435206250 435291200 435376150 435461100 435546050 435631000 435715950 435800900 435885850 435970800 436055750 436140700 436225650 436310600 436395550 436480500 436565450 436650400 436735350 436820300 436905250 436990200 437075150 437160100 437245050 478335000 478429950 478524900 478619850 478714800 478809750 478904700 478999650 479094600 479189550 479284500 479379450 479474400 479569350 479664300 479759250 479854200 479949150 480044100 480139050 480234000 480328950 480423900 480518850 480613800 480708750 480803700 480898650 480993600 481088550 481183500 481278450 481373400 481468350 481563300 481658250 481753200 481848150 481943100 482038050 482133000 482227950 482322900 482417850 482512800 482607750 482702700 482797650 482892600 482987550 483082500 483177450 483272400 483367350 483462300 483557250 483652200 483747150 483842100 483937050 484032000 484126950 484221900 484316850 484411800 484506750 484601700 484696650 484791600 484886550 484981500 485076450 485171400 485266350 485361300 485456250 485551200 485646150 485741100 485836050 485931000 486025950 486120900 486215850 486310800 486405750 486500700 486595650 486690600 486785550 486880500 486975450 487070400 487165350 487260300 487355250 487450200 487545150 487640100 487735050 527835000 527939950 528044900 528149850 528254800 528359750 528464700 528569650 528674600 528779550 528884500 528989450 529094400 529199350 529304300 529409250 529514200 529619150 529724100 529829050 529934000 530038950 530143900 530248850 530353800 530458750 530563700 530668650 530773600 530878550 530983500 531088450 531193400 531298350 531403300 531508250 531613200 531718150 531823100 531928050 532033000 532137950 532242900 532347850 532452800 532557750 532662700 532767650 532872600 532977550 533082500 533187450 533292400 533397350 533502300 533607250 533712200 533817150 533922100 534027050 534132000 534236950 534341900 534446850 534551800 534656750 534761700 534866650 534971600 535076550 535181500 535286450 535391400 535496350 535601300 535706250 535811200 535916150 536021100 536126050 536231000 536335950 536440900 536545850 536650800 536755750 536860700 536965650 537070600 537175550 537280500 537385450 537490400 537595350 537700300 537805250 537910200 538015150 538120100 538225050 577335000 577449950 577564900 577679850 577794800 577909750 578024700 578139650 578254600 578369550 578484500 578599450 578714400 578829350 578944300 579059250 579174200 579289150 579404100 579519050 579634000 579748950 579863900 579978850 580093800 580208750 580323700 580438650 580553600 580668550 580783500 580898450 581013400 581128350 581243300 581358250 581473200 581588150 581703100 581818050 581933000 582047950 582162900 582277850 582392800 582507750 582622700 582737650 582852600 582967550 583082500 583197450 583312400 583427350 583542300 583657250 583772200 583887150 584002100 584117050 584232000 584346950 584461900 584576850 584691800 584806750 584921700 585036650 585151600 585266550 585381500 585496450 585611400 585726350 585841300 585956250 586071200 586186150 586301100 586416050 586531000 586645950 586760900 586875850 586990800 587105750 587220700 587335650 587450600 587565550 587680500 587795450 587910400 588025350 588140300 588255250 588370200 588485150 588600100 588715050 626835000 626959950 627084900 627209850 627334800 627459750 627584700 627709650 627834600 627959550 628084500 628209450 628334400 628459350 628584300 628709250 628834200 628959150 629084100 629209050 629334000 629458950 629583900 629708850 629833800 629958750 630083700 630208650 630333600 630458550 630583500 630708450 630833400 630958350 631083300 631208250 631333200 631458150 631583100 631708050 631833000 631957950 632082900 632207850 632332800 632457750 632582700 632707650 632832600 632957550 633082500 633207450 633332400 633457350 633582300 633707250 633832200 633957150 634082100 634207050 634332000 634456950 634581900 634706850 634831800 634956750 635081700 635206650 635331600 635456550 635581500 635706450 635831400 635956350 636081300 636206250 636331200 636456150 636581100 636706050 636831000 636955950 637080900 637205850 637330800 637455750 637580700 637705650 637830600 637955550 638080500 638205450 638330400 638455350 638580300 638705250 638830200 638955150 639080100 639205050 676335000 676469950 676604900 676739850 676874800 677009750 677144700 677279650 677414600 677549550 677684500 677819450 677954400 678089350 678224300 678359250 678494200 678629150 678764100 678899050 679034000 679168950 679303900 679438850 679573800 679708750 679843700 679978650 680113600 680248550 680383500 680518450 680653400 680788350 680923300 681058250 681193200 681328150 681463100 681598050 681733000 681867950 682002900 682137850 682272800 682407750 682542700 682677650 682812600 682947550 683082500 683217450 683352400 683487350 683622300 683757250 683892200 684027150 684162100 684297050 684432000 684566950 684701900 684836850 684971800 685106750 685241700 685376650 685511600 685646550 685781500 685916450 686051400 686186350 686321300 686456250 686591200 686726150 686861100 686996050 687131000 687265950 687400900 687535850 687670800 687805750 687940700 688075650 688210600 688345550 688480500 688615450 688750400 688885350 689020300 689155250 689290200 689425150 689560100 689695050 725835000 725979950 726124900 726269850 726414800 726559750 726704700 726849650 726994600 727139550 727284500 727429450 727574400 727719350 727864300 728009250 728154200 728299150 728444100 728589050 728734000 728878950 729023900 729168850 729313800 729458750 729603700 729748650 729893600 730038550 730183500 730328450 730473400 730618350 730763300 730908250 731053200 731198150 731343100 731488050 731633000 731777950 731922900 732067850 732212800 732357750 732502700 732647650 732792600 732937550 733082500 733227450 733372400 733517350 733662300 733807250 733952200 734097150 734242100 734387050 734532000 734676950 734821900 734966850 735111800 735256750 735401700 735546650 735691600 735836550 735981500 736126450 736271400 736416350 736561300 736706250 736851200 736996150 737141100 737286050 737431000 737575950 737720900 737865850 738010800 738155750 738300700 738445650 738590600 738735550 738880500 739025450 739170400 739315350 739460300 739605250 739750200 739895150 740040100 740185050 775335000 775489950 775644900 775799850 775954800 776109750 776264700 776419650 776574600 776729550 776884500 777039450 777194400 777349350 777504300 777659250 777814200 777969150 778124100 778279050 778434000 778588950 778743900 778898850 779053800 779208750 779363700 779518650 779673600 779828550 779983500 780138450 780293400 780448350 780603300 780758250 780913200 781068150 781223100 781378050 781533000 781687950 781842900 781997850 782152800 782307750 782462700 782617650 782772600 782927550 783082500 783237450 783392400 783547350 783702300 783857250 784012200 784167150 784322100 784477050 784632000 784786950 784941900 785096850 785251800 785406750 785561700 785716650 785871600 786026550 786181500 786336450 786491400 786646350 786801300 786956250 787111200 787266150 787421100 787576050 787731000 787885950 788040900 788195850 788350800 788505750 788660700 788815650 788970600 789125550 789280500 789435450 789590400 789745350 789900300 790055250 790210200 790365150 790520100 790675050 824835000 824999950 825164900 825329850 825494800 825659750 825824700 825989650 826154600 826319550 826484500 826649450 826814400 826979350 827144300 827309250 827474200 827639150 827804100 827969050 828134000 828298950 828463900 828628850 828793800 828958750 829123700 829288650 829453600 829618550 829783500 829948450 830113400 830278350 830443300 830608250 830773200 830938150 831103100 831268050 831433000 831597950 831762900 831927850 832092800 832257750 832422700 832587650 832752600 832917550 833082500 833247450 833412400 833577350 833742300 833907250 834072200 834237150 834402100 834567050 834732000 834896950 835061900 835226850 835391800 835556750 835721700 835886650 836051600 836216550 836381500 836546450 836711400 836876350 837041300 837206250 837371200 837536150 837701100 837866050 838031000 838195950 838360900 838525850 838690800 838855750 839020700 839185650 839350600 839515550 839680500 839845450 840010400 840175350 840340300 840505250 840670200 840835150 841000100 841165050 874335000 874509950 874684900 874859850 875034800 875209750 875384700 875559650 875734600 875909550 876084500 876259450 876434400 876609350 876784300 876959250 877134200 877309150 877484100 877659050 877834000 878008950 878183900 878358850 878533800 878708750 878883700 879058650 879233600 879408550 879583500 879758450 879933400 880108350 880283300 880458250 880633200 880808150 880983100 881158050 881333000 881507950 881682900 881857850 882032800 882207750 882382700 882557650 882732600 882907550 883082500 883257450 883432400 883607350 883782300 883957250 884132200 884307150 884482100 884657050 884832000 885006950 885181900 885356850 885531800 885706750 885881700 886056650 886231600 886406550 886581500 886756450 886931400 887106350 887281300 887456250 887631200 887806150 887981100 888156050 888331000 888505950 888680900 888855850 889030800 889205750 889380700 889555650 889730600 889905550 890080500 890255450 890430400 890605350 890780300 890955250 891130200 891305150 891480100 891655050 923835000 924019950 924204900 924389850 924574800 924759750 924944700 925129650 925314600 925499550 925684500 925869450 926054400 926239350 926424300 926609250 926794200 926979150 927164100 927349050 927534000 927718950 927903900 928088850 928273800 928458750 928643700 928828650 929013600 929198550 929383500 929568450 929753400 929938350 930123300 930308250 930493200 930678150 930863100 931048050 931233000 931417950 931602900 931787850 931972800 932157750 932342700 932527650 932712600 932897550 933082500 933267450 933452400 933637350 933822300 934007250 934192200 934377150 934562100 934747050 934932000 935116950 935301900 935486850 935671800 935856750 936041700 936226650 936411600 936596550 936781500 936966450 937151400 937336350 937521300 937706250 937891200 938076150 938261100 938446050 938631000 938815950 939000900 939185850 939370800 939555750 939740700 939925650 940110600 940295550 940480500 940665450 940850400 941035350 941220300 941405250 941590200 941775150 941960100 942145050 973335000 973529950 973724900 973919850 974114800 974309750 974504700 974699650 974894600 975089550 975284500 975479450 975674400 975869350 976064300 976259250 976454200 976649150 976844100 977039050 977234000 977428950 977623900 977818850 978013800 978208750 978403700 978598650 978793600 978988550 979183500 979378450 979573400 979768350 979963300 980158250 980353200 980548150 980743100 980938050 981133000 981327950 981522900 981717850 981912800 982107750 982302700 982497650 982692600 982887550 983082500 983277450 983472400 983667350 983862300 984057250 984252200 984447150 984642100 984837050 985032000 985226950 985421900 985616850 985811800 986006750 986201700 986396650 986591600 986786550 986981500 987176450 987371400 987566350 987761300 987956250 988151200 988346150 988541100 988736050 988931000 989125950 989320900 989515850 989710800 989905750 990100700 990295650 990490600 990685550 990880500 991075450 991270400 991465350 991660300 991855250 992050200 992245150 992440100 992635050 1022835000 1023039950 1023244900 1023449850 1023654800 1023859750 1024064700 1024269650 1024474600 1024679550 1024884500 1025089450 1025294400 1025499350 1025704300 1025909250 1026114200 1026319150 1026524100 1026729050 1026934000 1027138950 1027343900 1027548850 1027753800 1027958750 1028163700 1028368650 1028573600 1028778550 1028983500 1029188450 1029393400 1029598350 1029803300 1030008250 1030213200 1030418150 1030623100 1030828050 1031033000 1031237950 1031442900 1031647850 1031852800 1032057750 1032262700 1032467650 1032672600 1032877550 1033082500 1033287450 1033492400 1033697350 1033902300 1034107250 1034312200 1034517150 1034722100 1034927050 1035132000 1035336950 1035541900 1035746850 1035951800 1036156750 1036361700 1036566650 1036771600 1036976550 1037181500 1037386450 1037591400 1037796350 1038001300 1038206250 1038411200 1038616150 1038821100 1039026050 1039231000 1039435950 1039640900 1039845850 1040050800 1040255750 1040460700 1040665650 1040870600 1041075550 1041280500 1041485450 1041690400 1041895350 1042100300 1042305250 1042510200 1042715150 1042920100 1043125050 1072335000 1072549950 1072764900 1072979850 1073194800 1073409750 1073624700 1073839650 1074054600 1074269550 1074484500 1074699450 1074914400 1075129350 1075344300 1075559250 1075774200 1075989150 1076204100 1076419050 1076634000 1076848950 1077063900 1077278850 1077493800 1077708750 1077923700 1078138650 1078353600 1078568550 1078783500 1078998450 1079213400 1079428350 1079643300 1079858250 1080073200 1080288150 1080503100 1080718050 1080933000 1081147950 1081362900 1081577850 1081792800 1082007750 1082222700 1082437650 1082652600 1082867550 1083082500 1083297450 1083512400 1083727350 1083942300 1084157250 1084372200 1084587150 1084802100 1085017050 1085232000 1085446950 1085661900 1085876850 1086091800 1086306750 1086521700 1086736650 1086951600 1087166550 1087381500 1087596450 1087811400 1088026350 1088241300 1088456250 1088671200 1088886150 1089101100 1089316050 1089531000 1089745950 1089960900 1090175850 1090390800 1090605750 1090820700 1091035650 1091250600 1091465550 1091680500 1091895450 1092110400 1092325350 1092540300 1092755250 1092970200 1093185150 1093400100 1093615050 1121835000 1122059950 1122284900 1122509850 1122734800 1122959750 1123184700 1123409650 1123634600 1123859550 1124084500 1124309450 1124534400 1124759350 1124984300 1125209250 1125434200 1125659150 1125884100 1126109050 1126334000 1126558950 1126783900 1127008850 1127233800 1127458750 1127683700 1127908650 1128133600 1128358550 1128583500 1128808450 1129033400 1129258350 1129483300 1129708250 1129933200 1130158150 1130383100 1130608050 1130833000 1131057950 1131282900 1131507850 1131732800 1131957750 1132182700 1132407650 1132632600 1132857550 1133082500 1133307450 1133532400 1133757350 1133982300 1134207250 1134432200 1134657150 1134882100 1135107050 1135332000 1135556950 1135781900 1136006850 1136231800 1136456750 1136681700 1136906650 1137131600 1137356550 1137581500 1137806450 1138031400 1138256350 1138481300 1138706250 1138931200 1139156150 1139381100 1139606050 1139831000 1140055950 1140280900 1140505850 1140730800 1140955750 1141180700 1141405650 1141630600 1141855550 1142080500 1142305450 1142530400 1142755350 1142980300 1143205250 1143430200 1143655150 1143880100 1144105050 1171335000 1171569950 1171804900 1172039850 1172274800 1172509750 1172744700 1172979650 1173214600 1173449550 1173684500 1173919450 1174154400 1174389350 1174624300 1174859250 1175094200 1175329150 1175564100 1175799050 1176034000 1176268950 1176503900 1176738850 1176973800 1177208750 1177443700 1177678650 1177913600 1178148550 1178383500 1178618450 1178853400 1179088350 1179323300 1179558250 1179793200 1180028150 1180263100 1180498050 1180733000 1180967950 1181202900 1181437850 1181672800 1181907750 1182142700 1182377650 1182612600 1182847550 1183082500 1183317450 1183552400 1183787350 1184022300 1184257250 1184492200 1184727150 1184962100 1185197050 1185432000 1185666950 1185901900 1186136850 1186371800 1186606750 1186841700 1187076650 1187311600 1187546550 1187781500 1188016450 1188251400 1188486350 1188721300 1188956250 1189191200 1189426150 1189661100 1189896050 1190131000 1190365950 1190600900 1190835850 1191070800 1191305750 1191540700 1191775650 1192010600 1192245550 1192480500 1192715450 1192950400 1193185350 1193420300 1193655250 1193890200 1194125150 1194360100 1194595050 1220835000 1221079950 1221324900 1221569850 1221814800 1222059750 1222304700 1222549650 1222794600 1223039550 1223284500 1223529450 1223774400 1224019350 1224264300 1224509250 1224754200 1224999150 1225244100 1225489050 1225734000 1225978950 1226223900 1226468850 1226713800 1226958750 1227203700 1227448650 1227693600 1227938550 1228183500 1228428450 1228673400 1228918350 1229163300 1229408250 1229653200 1229898150 1230143100 1230388050 1230633000 1230877950 1231122900 1231367850 1231612800 1231857750 1232102700 1232347650 1232592600 1232837550 1233082500 1233327450 1233572400 1233817350 1234062300 1234307250 1234552200 1234797150 1235042100 1235287050 1235532000 1235776950 1236021900 1236266850 1236511800 1236756750 1237001700 1237246650 1237491600 1237736550 1237981500 1238226450 1238471400 1238716350 1238961300 1239206250 1239451200 1239696150 1239941100 1240186050 1240431000 1240675950 1240920900 1241165850 1241410800 1241655750 1241900700 1242145650 1242390600 1242635550 1242880500 1243125450 1243370400 1243615350 1243860300 1244105250 1244350200 1244595150 1244840100 1245085050 1270335000 1270589950 1270844900 1271099850 1271354800 1271609750 1271864700 1272119650 1272374600 1272629550 1272884500 1273139450 1273394400 1273649350 1273904300 1274159250 1274414200 1274669150 1274924100 1275179050 1275434000 1275688950 1275943900 1276198850 1276453800 1276708750 1276963700 1277218650 1277473600 1277728550 1277983500 1278238450 1278493400 1278748350 1279003300 1279258250 1279513200 1279768150 1280023100 1280278050 1280533000 1280787950 1281042900 1281297850 1281552800 1281807750 1282062700 1282317650 1282572600 1282827550 1283082500 1283337450 1283592400 1283847350 1284102300 1284357250 1284612200 1284867150 1285122100 1285377050 1285632000 1285886950 1286141900 1286396850 1286651800 1286906750 1287161700 1287416650 1287671600 1287926550 1288181500 1288436450 1288691400 1288946350 1289201300 1289456250 1289711200 1289966150 1290221100 1290476050 1290731000 1290985950 1291240900 1291495850 1291750800 1292005750 1292260700 1292515650 1292770600 1293025550 1293280500 1293535450 1293790400 1294045350 1294300300 1294555250 1294810200 1295065150 1295320100 1295575050 1319835000 1320099950 1320364900 1320629850 1320894800 1321159750 1321424700 1321689650 1321954600 1322219550 1322484500 1322749450 1323014400 1323279350 1323544300 1323809250 1324074200 1324339150 1324604100 1324869050 1325134000 1325398950 1325663900 1325928850 1326193800 1326458750 1326723700 1326988650 1327253600 1327518550 1327783500 1328048450 1328313400 1328578350 1328843300 1329108250 1329373200 1329638150 1329903100 1330168050 1330433000 1330697950 1330962900 1331227850 1331492800 1331757750 1332022700 1332287650 1332552600 1332817550 1333082500 1333347450 1333612400 1333877350 1334142300 1334407250 1334672200 1334937150 1335202100 1335467050 1335732000 1335996950 1336261900 1336526850 1336791800 1337056750 1337321700 1337586650 1337851600 1338116550 1338381500 1338646450 1338911400 1339176350 1339441300 1339706250 1339971200 1340236150 1340501100 1340766050 1341031000 1341295950 1341560900 1341825850 1342090800 1342355750 1342620700 1342885650 1343150600 1343415550 1343680500 1343945450 1344210400 1344475350 1344740300 1345005250 1345270200 1345535150 1345800100 1346065050 1369335000 1369609950 1369884900 1370159850 1370434800 1370709750 1370984700 1371259650 1371534600 1371809550 1372084500 1372359450 1372634400 1372909350 1373184300 1373459250 1373734200 1374009150 1374284100 1374559050 1374834000 1375108950 1375383900 1375658850 1375933800 1376208750 1376483700 1376758650 1377033600 1377308550 1377583500 1377858450 1378133400 1378408350 1378683300 1378958250 1379233200 1379508150 1379783100 1380058050 1380333000 1380607950 1380882900 1381157850 1381432800 1381707750 1381982700 1382257650 1382532600 1382807550 1383082500 1383357450 1383632400 1383907350 1384182300 1384457250 1384732200 1385007150 1385282100 1385557050 1385832000 1386106950 1386381900 1386656850 1386931800 1387206750 1387481700 1387756650 1388031600 1388306550 1388581500 1388856450 1389131400 1389406350 1389681300 1389956250 1390231200 1390506150 1390781100 1391056050 1391331000 1391605950 1391880900 1392155850 1392430800 1392705750 1392980700 1393255650 1393530600 1393805550 1394080500 1394355450 1394630400 1394905350 1395180300 1395455250 1395730200 1396005150 1396280100 1396555050 1418835000 1419119950 1419404900 1419689850 1419974800 1420259750 1420544700 1420829650 1421114600 1421399550 1421684500 1421969450 1422254400 1422539350 1422824300 1423109250 1423394200 1423679150 1423964100 1424249050 1424534000 1424818950 1425103900 1425388850 1425673800 1425958750 1426243700 1426528650 1426813600 1427098550 1427383500 1427668450 1427953400 1428238350 1428523300 1428808250 1429093200 1429378150 1429663100 1429948050 1430233000 1430517950 1430802900 1431087850 1431372800 1431657750 1431942700 1432227650 1432512600 1432797550 1433082500 1433367450 1433652400 1433937350 1434222300 1434507250 1434792200 1435077150 1435362100 1435647050 1435932000 1436216950 1436501900 1436786850 1437071800 1437356750 1437641700 1437926650 1438211600 1438496550 1438781500 1439066450 1439351400 1439636350 1439921300 1440206250 1440491200 1440776150 1441061100 1441346050 1441631000 1441915950 1442200900 1442485850 1442770800 1443055750 1443340700 1443625650 1443910600 1444195550 1444480500 1444765450 1445050400 1445335350 1445620300 1445905250 1446190200 1446475150 1446760100 1447045050 1468335000 1468629950 1468924900 1469219850 1469514800 1469809750 1470104700 1470399650 1470694600 1470989550 1471284500 1471579450 1471874400 1472169350 1472464300 1472759250 1473054200 1473349150 1473644100 1473939050 1474234000 1474528950 1474823900 1475118850 1475413800 1475708750 1476003700 1476298650 1476593600 1476888550 1477183500 1477478450 1477773400 1478068350 1478363300 1478658250 1478953200 1479248150 1479543100 1479838050 1480133000 1480427950 1480722900 1481017850 1481312800 1481607750 1481902700 1482197650 1482492600 1482787550 1483082500 1483377450 1483672400 1483967350 1484262300 1484557250 1484852200 1485147150 1485442100 1485737050 1486032000 1486326950 1486621900 1486916850 1487211800 1487506750 1487801700 1488096650 1488391600 1488686550 1488981500 1489276450 1489571400 1489866350 1490161300 1490456250 1490751200 1491046150 1491341100 1491636050 1491931000 1492225950 1492520900 1492815850 1493110800 1493405750 1493700700 1493995650 1494290600 1494585550 1494880500 1495175450 1495470400 1495765350 1496060300 1496355250 1496650200 1496945150 1497240100 1497535050 1517835000 1518139950 1518444900 1518749850 1519054800 1519359750 1519664700 1519969650 1520274600 1520579550 1520884500 1521189450 1521494400 1521799350 1522104300 1522409250 1522714200 1523019150 1523324100 1523629050 1523934000 1524238950 1524543900 1524848850 1525153800 1525458750 1525763700 1526068650 1526373600 1526678550 1526983500 1527288450 1527593400 1527898350 1528203300 1528508250 1528813200 1529118150 1529423100 1529728050 1530033000 1530337950 1530642900 1530947850 1531252800 1531557750 1531862700 1532167650 1532472600 1532777550 1533082500 1533387450 1533692400 1533997350 1534302300 1534607250 1534912200 1535217150 1535522100 1535827050 1536132000 1536436950 1536741900 1537046850 1537351800 1537656750 1537961700 1538266650 1538571600 1538876550 1539181500 1539486450 1539791400 1540096350 1540401300 1540706250 1541011200 1541316150 1541621100 1541926050 1542231000 1542535950 1542840900 1543145850 1543450800 1543755750 1544060700 1544365650 1544670600 1544975550 1545280500 1545585450 1545890400 1546195350 1546500300 1546805250 1547110200 1547415150 1547720100 1548025050 1567335000 1567649950 1567964900 1568279850 1568594800 1568909750 1569224700 1569539650 1569854600 1570169550 1570484500 1570799450 1571114400 1571429350 1571744300 1572059250 1572374200 1572689150 1573004100 1573319050 1573634000 1573948950 1574263900 1574578850 1574893800 1575208750 1575523700 1575838650 1576153600 1576468550 1576783500 1577098450 1577413400 1577728350 1578043300 1578358250 1578673200 1578988150 1579303100 1579618050 1579933000 1580247950 1580562900 1580877850 1581192800 1581507750 1581822700 1582137650 1582452600 1582767550 1583082500 1583397450 1583712400 1584027350 1584342300 1584657250 1584972200 1585287150 1585602100 1585917050 1586232000 1586546950 1586861900 1587176850 1587491800 1587806750 1588121700 1588436650 1588751600 1589066550 1589381500 1589696450 1590011400 1590326350 1590641300 1590956250 1591271200 1591586150 1591901100 1592216050 1592531000 1592845950 1593160900 1593475850 1593790800 1594105750 1594420700 1594735650 1595050600 1595365550 1595680500 1595995450 1596310400 1596625350 1596940300 1597255250 1597570200 1597885150 1598200100 1598515050 1616835000 1617159950 1617484900 1617809850 1618134800 1618459750 1618784700 1619109650 1619434600 1619759550 1620084500 1620409450 1620734400 1621059350 1621384300 1621709250 1622034200 1622359150 1622684100 1623009050 1623334000 1623658950 1623983900 1624308850 1624633800 1624958750 1625283700 1625608650 1625933600 1626258550 1626583500 1626908450 1627233400 1627558350 1627883300 1628208250 1628533200 1628858150 1629183100 1629508050 1629833000 1630157950 1630482900 1630807850 1631132800 1631457750 1631782700 1632107650 1632432600 1632757550 1633082500 1633407450 1633732400 1634057350 1634382300 1634707250 1635032200 1635357150 1635682100 1636007050 1636332000 1636656950 1636981900 1637306850 1637631800 1637956750 1638281700 1638606650 1638931600 1639256550 1639581500 1639906450 1640231400 1640556350 1640881300 1641206250 1641531200 1641856150 1642181100 1642506050 1642831000 1643155950 1643480900 1643805850 1644130800 1644455750 1644780700 1645105650 1645430600 1645755550 1646080500 1646405450 1646730400 1647055350 1647380300 1647705250 1648030200 1648355150 1648680100 1649005050 1666335000 1666669950 1667004900 1667339850 1667674800 1668009750 1668344700 1668679650 1669014600 1669349550 1669684500 1670019450 1670354400 1670689350 1671024300 1671359250 1671694200 1672029150 1672364100 1672699050 1673034000 1673368950 1673703900 1674038850 1674373800 1674708750 1675043700 1675378650 1675713600 1676048550 1676383500 1676718450 1677053400 1677388350 1677723300 1678058250 1678393200 1678728150 1679063100 1679398050 1679733000 1680067950 1680402900 1680737850 1681072800 1681407750 1681742700 1682077650 1682412600 1682747550 1683082500 1683417450 1683752400 1684087350 1684422300 1684757250 1685092200 1685427150 1685762100 1686097050 1686432000 1686766950 1687101900 1687436850 1687771800 1688106750 1688441700 1688776650 1689111600 1689446550 1689781500 1690116450 1690451400 1690786350 1691121300 1691456250 1691791200 1692126150 1692461100 1692796050 1693131000 1693465950 1693800900 1694135850 1694470800 1694805750 1695140700 1695475650 1695810600 1696145550 1696480500 1696815450 1697150400 1697485350 1697820300 1698155250 1698490200 1698825150 1699160100 1699495050 1715835000 1716179950 1716524900 1716869850 1717214800 1717559750 1717904700 1718249650 1718594600 1718939550 1719284500 1719629450 1719974400 1720319350 1720664300 1721009250 1721354200 1721699150 1722044100 1722389050 1722734000 1723078950 1723423900 1723768850 1724113800 1724458750 1724803700 1725148650 1725493600 1725838550 1726183500 1726528450 1726873400 1727218350 1727563300 1727908250 1728253200 1728598150 1728943100 1729288050 1729633000 1729977950 1730322900 1730667850 1731012800 1731357750 1731702700 1732047650 1732392600 1732737550 1733082500 1733427450 1733772400 1734117350 1734462300 1734807250 1735152200 1735497150 1735842100 1736187050 1736532000 1736876950 1737221900 1737566850 1737911800 1738256750 1738601700 1738946650 1739291600 1739636550 1739981500 1740326450 1740671400 1741016350 1741361300 1741706250 1742051200 1742396150 1742741100 1743086050 1743431000 1743775950 1744120900 1744465850 1744810800 1745155750 1745500700 1745845650 1746190600 1746535550 1746880500 1747225450 1747570400 1747915350 1748260300 1748605250 1748950200 1749295150 1749640100 1749985050 1765335000 1765689950 1766044900 1766399850 1766754800 1767109750 1767464700 1767819650 1768174600 1768529550 1768884500 1769239450 1769594400 1769949350 1770304300 1770659250 1771014200 1771369150 1771724100 1772079050 1772434000 1772788950 1773143900 1773498850 1773853800 1774208750 1774563700 1774918650 1775273600 1775628550 1775983500 1776338450 1776693400 1777048350 1777403300 1777758250 1778113200 1778468150 1778823100 1779178050 1779533000 1779887950 1780242900 1780597850 1780952800 1781307750 1781662700 1782017650 1782372600 1782727550 1783082500 1783437450 1783792400 1784147350 1784502300 1784857250 1785212200 1785567150 1785922100 1786277050 1786632000 1786986950 1787341900 1787696850 1788051800 1788406750 1788761700 1789116650 1789471600 1789826550 1790181500 1790536450 1790891400 1791246350 1791601300 1791956250 1792311200 1792666150 1793021100 1793376050 1793731000 1794085950 1794440900 1794795850 1795150800 1795505750 1795860700 1796215650 1796570600 1796925550 1797280500 1797635450 1797990400 1798345350 1798700300 1799055250 1799410200 1799765150 1800120100 1800475050 1814835000 1815199950 1815564900 1815929850 1816294800 1816659750 1817024700 1817389650 1817754600 1818119550 1818484500 1818849450 1819214400 1819579350 1819944300 1820309250 1820674200 1821039150 1821404100 1821769050 1822134000 1822498950 1822863900 1823228850 1823593800 1823958750 1824323700 1824688650 1825053600 1825418550 1825783500 1826148450 1826513400 1826878350 1827243300 1827608250 1827973200 1828338150 1828703100 1829068050 1829433000 1829797950 1830162900 1830527850 1830892800 1831257750 1831622700 1831987650 1832352600 1832717550 1833082500 1833447450 1833812400 1834177350 1834542300 1834907250 1835272200 1835637150 1836002100 1836367050 1836732000 1837096950 1837461900 1837826850 1838191800 1838556750 1838921700 1839286650 1839651600 1840016550 1840381500 1840746450 1841111400 1841476350 1841841300 1842206250 1842571200 1842936150 1843301100 1843666050 1844031000 1844395950 1844760900 1845125850 1845490800 1845855750 1846220700 1846585650 1846950600 1847315550 1847680500 1848045450 1848410400 1848775350 1849140300 1849505250 1849870200 1850235150 1850600100 1850965050 1864335000 1864709950 1865084900 1865459850 1865834800 1866209750 1866584700 1866959650 1867334600 1867709550 1868084500 1868459450 1868834400 1869209350 1869584300 1869959250 1870334200 1870709150 1871084100 1871459050 1871834000 1872208950 1872583900 1872958850 1873333800 1873708750 1874083700 1874458650 1874833600 1875208550 1875583500 1875958450 1876333400 1876708350 1877083300 1877458250 1877833200 1878208150 1878583100 1878958050 1879333000 1879707950 1880082900 1880457850 1880832800 1881207750 1881582700 1881957650 1882332600 1882707550 1883082500 1883457450 1883832400 1884207350 1884582300 1884957250 1885332200 1885707150 1886082100 1886457050 1886832000 1887206950 1887581900 1887956850 1888331800 1888706750 1889081700 1889456650 1889831600 1890206550 1890581500 1890956450 1891331400 1891706350 1892081300 1892456250 1892831200 1893206150 1893581100 1893956050 1894331000 1894705950 1895080900 1895455850 1895830800 1896205750 1896580700 1896955650 1897330600 1897705550 1898080500 1898455450 1898830400 1899205350 1899580300 1899955250 1900330200 1900705150 1901080100 1901455050 1913835000 1914219950 1914604900 1914989850 1915374800 1915759750 1916144700 1916529650 1916914600 1917299550 1917684500 1918069450 1918454400 1918839350 1919224300 1919609250 1919994200 1920379150 1920764100 1921149050 1921534000 1921918950 1922303900 1922688850 1923073800 1923458750 1923843700 1924228650 1924613600 1924998550 1925383500 1925768450 1926153400 1926538350 1926923300 1927308250 1927693200 1928078150 1928463100 1928848050 1929233000 1929617950 1930002900 1930387850 1930772800 1931157750 1931542700 1931927650 1932312600 1932697550 1933082500 1933467450 1933852400 1934237350 1934622300 1935007250 1935392200 1935777150 1936162100 1936547050 1936932000 1937316950 1937701900 1938086850 1938471800 1938856750 1939241700 1939626650 1940011600 1940396550 1940781500 1941166450 1941551400 1941936350 1942321300 1942706250 1943091200 1943476150 1943861100 1944246050 1944631000 1945015950 1945400900 1945785850 1946170800 1946555750 1946940700 1947325650 1947710600 1948095550 1948480500 1948865450 1949250400 1949635350 1950020300 1950405250 1950790200 1951175150 1951560100 1951945050 1963335000 1963729950 1964124900 1964519850 1964914800 1965309750 1965704700 1966099650 1966494600 1966889550 1967284500 1967679450 1968074400 1968469350 1968864300 1969259250 1969654200 1970049150 1970444100 1970839050 1971234000 1971628950 1972023900 1972418850 1972813800 1973208750 1973603700 1973998650 1974393600 1974788550 1975183500 1975578450 1975973400 1976368350 1976763300 1977158250 1977553200 1977948150 1978343100 1978738050 1979133000 1979527950 1979922900 1980317850 1980712800 1981107750 1981502700 1981897650 1982292600 1982687550 1983082500 1983477450 1983872400 1984267350 1984662300 1985057250 1985452200 1985847150 1986242100 1986637050 1987032000 1987426950 1987821900 1988216850 1988611800 1989006750 1989401700 1989796650 1990191600 1990586550 1990981500 1991376450 1991771400 1992166350 1992561300 1992956250 1993351200 1993746150 1994141100 1994536050 1994931000 1995325950 1995720900 1996115850 1996510800 1996905750 1997300700 1997695650 1998090600 1998485550 1998880500 1999275450 1999670400 2000065350 2000460300 2000855250 2001250200 2001645150 2002040100 2002435050 2012835000 2013239950 2013644900 2014049850 2014454800 2014859750 2015264700 2015669650 2016074600 2016479550 2016884500 2017289450 2017694400 2018099350 2018504300 2018909250 2019314200 2019719150 2020124100 2020529050 2020934000 2021338950 2021743900 2022148850 2022553800 2022958750 2023363700 2023768650 2024173600 2024578550 2024983500 2025388450 2025793400 2026198350 2026603300 2027008250 2027413200 2027818150 2028223100 2028628050 2029033000 2029437950 2029842900 2030247850 2030652800 2031057750 2031462700 2031867650 2032272600 2032677550 2033082500 2033487450 2033892400 2034297350 2034702300 2035107250 2035512200 2035917150 2036322100 2036727050 2037132000 2037536950 2037941900 2038346850 2038751800 2039156750 2039561700 2039966650 2040371600 2040776550 2041181500 2041586450 2041991400 2042396350 2042801300 2043206250 2043611200 2044016150 2044421100 2044826050 2045231000 2045635950 2046040900 2046445850 2046850800 2047255750 2047660700 2048065650 2048470600 2048875550 2049280500 2049685450 2050090400 2050495350 2050900300 2051305250 2051710200 2052115150 2052520100 2052925050 2062335000 2062749950 2063164900 2063579850 2063994800 2064409750 2064824700 2065239650 2065654600 2066069550 2066484500 2066899450 2067314400 2067729350 2068144300 2068559250 2068974200 2069389150 2069804100 2070219050 2070634000 2071048950 2071463900 2071878850 2072293800 2072708750 2073123700 2073538650 2073953600 2074368550 2074783500 2075198450 2075613400 2076028350 2076443300 2076858250 2077273200 2077688150 2078103100 2078518050 2078933000 2079347950 2079762900 2080177850 2080592800 2081007750 2081422700 2081837650 2082252600 2082667550 2083082500 2083497450 2083912400 2084327350 2084742300 2085157250 2085572200 2085987150 2086402100 2086817050 2087232000 2087646950 2088061900 2088476850 2088891800 2089306750 2089721700 2090136650 2090551600 2090966550 2091381500 2091796450 2092211400 2092626350 2093041300 2093456250 2093871200 2094286150 2094701100 2095116050 2095531000 2095945950 2096360900 2096775850 2097190800 2097605750 2098020700 2098435650 2098850600 2099265550 2099680500 2100095450 2100510400 2100925350 2101340300 2101755250 2102170200 2102585150 2103000100 2103415050 2111835000 2112259950 2112684900 2113109850 2113534800 2113959750 2114384700 2114809650 2115234600 2115659550 2116084500 2116509450 2116934400 2117359350 2117784300 2118209250 2118634200 2119059150 2119484100 2119909050 2120334000 2120758950 2121183900 2121608850 2122033800 2122458750 2122883700 2123308650 2123733600 2124158550 2124583500 2125008450 2125433400 2125858350 2126283300 2126708250 2127133200 2127558150 2127983100 2128408050 2128833000 2129257950 2129682900 2130107850 2130532800 2130957750 2131382700 2131807650 2132232600 2132657550 2133082500 2133507450 2133932400 2134357350 2134782300 2135207250 2135632200 2136057150 2136482100 2136907050 2137332000 2137756950 2138181900 2138606850 2139031800 2139456750 2139881700 2140306650 2140731600 2141156550 2141581500 2142006450 2142431400 2142856350 2143281300 2143706250 2144131200 2144556150 2144981100 2145406050 2145831000 2146255950 2146680900 2147105850 2147530800 2147955750 2148380700 2148805650 2149230600 2149655550 2150080500 2150505450 2150930400 2151355350 2151780300 2152205250 2152630200 2153055150 2153480100 2153905050 2161335000 2161769950 2162204900 2162639850 2163074800 2163509750 2163944700 2164379650 2164814600 2165249550 2165684500 2166119450 2166554400 2166989350 2167424300 2167859250 2168294200 2168729150 2169164100 2169599050 2170034000 2170468950 2170903900 2171338850 2171773800 2172208750 2172643700 2173078650 2173513600 2173948550 2174383500 2174818450 2175253400 2175688350 2176123300 2176558250 2176993200 2177428150 2177863100 2178298050 2178733000 2179167950 2179602900 2180037850 2180472800 2180907750 2181342700 2181777650 2182212600 2182647550 2183082500 2183517450 2183952400 2184387350 2184822300 2185257250 2185692200 2186127150 2186562100 2186997050 2187432000 2187866950 2188301900 2188736850 2189171800 2189606750 2190041700 2190476650 2190911600 2191346550 2191781500 2192216450 2192651400 2193086350 2193521300 2193956250 2194391200 2194826150 2195261100 2195696050 2196131000 2196565950 2197000900 2197435850 2197870800 2198305750 2198740700 2199175650 2199610600 2200045550 2200480500 2200915450 2201350400 2201785350 2202220300 2202655250 2203090200 2203525150 2203960100 2204395050 2210835000 2211279950 2211724900 2212169850 2212614800 2213059750 2213504700 2213949650 2214394600 2214839550 2215284500 2215729450 2216174400 2216619350 2217064300 2217509250 2217954200 2218399150 2218844100 2219289050 2219734000 2220178950 2220623900 2221068850 2221513800 2221958750 2222403700 2222848650 2223293600 2223738550 2224183500 2224628450 2225073400 2225518350 2225963300 2226408250 2226853200 2227298150 2227743100 2228188050 2228633000 2229077950 2229522900 2229967850 2230412800 2230857750 2231302700 2231747650 2232192600 2232637550 2233082500 2233527450 2233972400 2234417350 2234862300 2235307250 2235752200 2236197150 2236642100 2237087050 2237532000 2237976950 2238421900 2238866850 2239311800 2239756750 2240201700 2240646650 2241091600 2241536550 2241981500 2242426450 2242871400 2243316350 2243761300 2244206250 2244651200 2245096150 2245541100 2245986050 2246431000 2246875950 2247320900 2247765850 2248210800 2248655750 2249100700 2249545650 2249990600 2250435550 2250880500 2251325450 2251770400 2252215350 2252660300 2253105250 2253550200 2253995150 2254440100 2254885050 2260335000 2260789950 2261244900 2261699850 2262154800 2262609750 2263064700 2263519650 2263974600 2264429550 2264884500 2265339450 2265794400 2266249350 2266704300 2267159250 2267614200 2268069150 2268524100 2268979050 2269434000 2269888950 2270343900 2270798850 2271253800 2271708750 2272163700 2272618650 2273073600 2273528550 2273983500 2274438450 2274893400 2275348350 2275803300 2276258250 2276713200 2277168150 2277623100 2278078050 2278533000 2278987950 2279442900 2279897850 2280352800 2280807750 2281262700 2281717650 2282172600 2282627550 2283082500 2283537450 2283992400 2284447350 2284902300 2285357250 2285812200 2286267150 2286722100 2287177050 2287632000 2288086950 2288541900 2288996850 2289451800 2289906750 2290361700 2290816650 2291271600 2291726550 2292181500 2292636450 2293091400 2293546350 2294001300 2294456250 2294911200 2295366150 2295821100 2296276050 2296731000 2297185950 2297640900 2298095850 2298550800 2299005750 2299460700 2299915650 2300370600 2300825550 2301280500 2301735450 2302190400 2302645350 2303100300 2303555250 2304010200 2304465150 2304920100 2305375050 2309835000 2310299950 2310764900 2311229850 2311694800 2312159750 2312624700 2313089650 2313554600 2314019550 2314484500 2314949450 2315414400 2315879350 2316344300 2316809250 2317274200 2317739150 2318204100 2318669050 2319134000 2319598950 2320063900 2320528850 2320993800 2321458750 2321923700 2322388650 2322853600 2323318550 2323783500 2324248450 2324713400 2325178350 2325643300 2326108250 2326573200 2327038150 2327503100 2327968050 2328433000 2328897950 2329362900 2329827850 2330292800 2330757750 2331222700 2331687650 2332152600 2332617550 2333082500 2333547450 2334012400 2334477350 2334942300 2335407250 2335872200 2336337150 2336802100 2337267050 2337732000 2338196950 2338661900 2339126850 2339591800 2340056750 2340521700 2340986650 2341451600 2341916550 2342381500 2342846450 2343311400 2343776350 2344241300 2344706250 2345171200 2345636150 2346101100 2346566050 2347031000 2347495950 2347960900 2348425850 2348890800 2349355750 2349820700 2350285650 2350750600 2351215550 2351680500 2352145450 2352610400 2353075350 2353540300 2354005250 2354470200 2354935150 2355400100 2355865050 2359335000 2359809950 2360284900 2360759850 2361234800 2361709750 2362184700 2362659650 2363134600 2363609550 2364084500 2364559450 2365034400 2365509350 2365984300 2366459250 2366934200 2367409150 2367884100 2368359050 2368834000 2369308950 2369783900 2370258850 2370733800 2371208750 2371683700 2372158650 2372633600 2373108550 2373583500 2374058450 2374533400 2375008350 2375483300 2375958250 2376433200 2376908150 2377383100 2377858050 2378333000 2378807950 2379282900 2379757850 2380232800 2380707750 2381182700 2381657650 2382132600 2382607550 2383082500 2383557450 2384032400 2384507350 2384982300 2385457250 2385932200 2386407150 2386882100 2387357050 2387832000 2388306950 2388781900 2389256850 2389731800 2390206750 2390681700 2391156650 2391631600 2392106550 2392581500 2393056450 2393531400 2394006350 2394481300 2394956250 2395431200 2395906150 2396381100 2396856050 2397331000 2397805950 2398280900 2398755850 2399230800 2399705750 2400180700 2400655650 2401130600 2401605550 2402080500 2402555450 2403030400 2403505350 2403980300 2404455250 2404930200 2405405150 2405880100 2406355050 2408835000 2409319950 2409804900 2410289850 2410774800 2411259750 2411744700 2412229650 2412714600 2413199550 2413684500 2414169450 2414654400 2415139350 2415624300 2416109250 2416594200 2417079150 2417564100 2418049050 2418534000 2419018950 2419503900 2419988850 2420473800 2420958750 2421443700 2421928650 2422413600 2422898550 2423383500 2423868450 2424353400 2424838350 2425323300 2425808250 2426293200 2426778150 2427263100 2427748050 2428233000 2428717950 2429202900 2429687850 2430172800 2430657750 2431142700 2431627650 2432112600 2432597550 2433082500 2433567450 2434052400 2434537350 2435022300 2435507250 2435992200 2436477150 2436962100 2437447050 2437932000 2438416950 2438901900 2439386850 2439871800 2440356750 2440841700 2441326650 2441811600 2442296550 2442781500 2443266450 2443751400 2444236350 2444721300 2445206250 2445691200 2446176150 2446661100 2447146050 2447631000 2448115950 2448600900 2449085850 2449570800 2450055750 2450540700 2451025650 2451510600 2451995550 2452480500 2452965450 2453450400 2453935350 2454420300 2454905250 2455390200 2455875150 2456360100 2456845050 2458335000 2458829950 2459324900 2459819850 2460314800 2460809750 2461304700 2461799650 2462294600 2462789550 2463284500 2463779450 2464274400 2464769350 2465264300 2465759250 2466254200 2466749150 2467244100 2467739050 2468234000 2468728950 2469223900 2469718850 2470213800 2470708750 2471203700 2471698650 2472193600 2472688550 2473183500 2473678450 2474173400 2474668350 2475163300 2475658250 2476153200 2476648150 2477143100 2477638050 2478133000 2478627950 2479122900 2479617850 2480112800 2480607750 2481102700 2481597650 2482092600 2482587550 2483082500 2483577450 2484072400 2484567350 2485062300 2485557250 2486052200 2486547150 2487042100 2487537050 2488032000 2488526950 2489021900 2489516850 2490011800 2490506750 2491001700 2491496650 2491991600 2492486550 2492981500 2493476450 2493971400 2494466350 2494961300 2495456250 2495951200 2496446150 2496941100 2497436050 2497931000 2498425950 2498920900 2499415850 2499910800 2500405750 2500900700 2501395650 2501890600 2502385550 2502880500 2503375450 2503870400 2504365350 2504860300 2505355250 2505850200 2506345150 2506840100 2507335050 2507835000 2508339950 2508844900 2509349850 2509854800 2510359750 2510864700 2511369650 2511874600 2512379550 2512884500 2513389450 2513894400 2514399350 2514904300 2515409250 2515914200 2516419150 2516924100 2517429050 2517934000 2518438950 2518943900 2519448850 2519953800 2520458750 2520963700 2521468650 2521973600 2522478550 2522983500 2523488450 2523993400 2524498350 2525003300 2525508250 2526013200 2526518150 2527023100 2527528050 2528033000 2528537950 2529042900 2529547850 2530052800 2530557750 2531062700 2531567650 2532072600 2532577550 2533082500 2533587450 2534092400 2534597350 2535102300 2535607250 2536112200 2536617150 2537122100 2537627050 2538132000 2538636950 2539141900 2539646850 2540151800 2540656750 2541161700 2541666650 2542171600 2542676550 2543181500 2543686450 2544191400 2544696350 2545201300 2545706250 2546211200 2546716150 2547221100 2547726050 2548231000 2548735950 2549240900 2549745850 2550250800 2550755750 2551260700 2551765650 2552270600 2552775550 2553280500 2553785450 2554290400 2554795350 2555300300 2555805250 2556310200 2556815150 2557320100 2557825050 2557335000 2557849950 2558364900 2558879850 2559394800 2559909750 2560424700 2560939650 2561454600 2561969550 2562484500 2562999450 2563514400 2564029350 2564544300 2565059250 2565574200 2566089150 2566604100 2567119050 2567634000 2568148950 2568663900 2569178850 2569693800 2570208750 2570723700 2571238650 2571753600 2572268550 2572783500 2573298450 2573813400 2574328350 2574843300 2575358250 2575873200 2576388150 2576903100 2577418050 2577933000 2578447950 2578962900 2579477850 2579992800 2580507750 2581022700 2581537650 2582052600 2582567550 2583082500 2583597450 2584112400 2584627350 2585142300 2585657250 2586172200 2586687150 2587202100 2587717050 2588232000 2588746950 2589261900 2589776850 2590291800 2590806750 2591321700 2591836650 2592351600 2592866550 2593381500 2593896450 2594411400 2594926350 2595441300 2595956250 2596471200 2596986150 2597501100 2598016050 2598531000 2599045950 2599560900 2600075850 2600590800 2601105750 2601620700 2602135650 2602650600 2603165550 2603680500 2604195450 2604710400 2605225350 2605740300 2606255250 2606770200 2607285150 2607800100 2608315050 2606835000 2607359950 2607884900 2608409850 2608934800 2609459750 2609984700 2610509650 2611034600 2611559550 2612084500 2612609450 2613134400 2613659350 2614184300 2614709250 2615234200 2615759150 2616284100 2616809050 2617334000 2617858950 2618383900 2618908850 2619433800 2619958750 2620483700 2621008650 2621533600 2622058550 2622583500 2623108450 2623633400 2624158350 2624683300 2625208250 2625733200 2626258150 2626783100 2627308050 2627833000 2628357950 2628882900 2629407850 2629932800 2630457750 2630982700 2631507650 2632032600 2632557550 2633082500 2633607450 2634132400 2634657350 2635182300 2635707250 2636232200 2636757150 2637282100 2637807050 2638332000 2638856950 2639381900 2639906850 2640431800 2640956750 2641481700 2642006650 2642531600 2643056550 2643581500 2644106450 2644631400 2645156350 2645681300 2646206250 2646731200 2647256150 2647781100 2648306050 2648831000 2649355950 2649880900 2650405850 2650930800 2651455750 2651980700 2652505650 2653030600 2653555550 2654080500 2654605450 2655130400 2655655350 2656180300 2656705250 2657230200 2657755150 2658280100 2658805050 2656335000 2656869950 2657404900 2657939850 2658474800 2659009750 2659544700 2660079650 2660614600 2661149550 2661684500 2662219450 2662754400 2663289350 2663824300 2664359250 2664894200 2665429150 2665964100 2666499050 2667034000 2667568950 2668103900 2668638850 2669173800 2669708750 2670243700 2670778650 2671313600 2671848550 2672383500 2672918450 2673453400 2673988350 2674523300 2675058250 2675593200 2676128150 2676663100 2677198050 2677733000 2678267950 2678802900 2679337850 2679872800 2680407750 2680942700 2681477650 2682012600 2682547550 2683082500 2683617450 2684152400 2684687350 2685222300 2685757250 2686292200 2686827150 2687362100 2687897050 2688432000 2688966950 2689501900 2690036850 2690571800 2691106750 2691641700 2692176650 2692711600 2693246550 2693781500 2694316450 2694851400 2695386350 2695921300 2696456250 2696991200 2697526150 2698061100 2698596050 2699131000 2699665950 2700200900 2700735850 2701270800 2701805750 2702340700 2702875650 2703410600 2703945550 2704480500 2705015450 2705550400 2706085350 2706620300 2707155250 2707690200 2708225150 2708760100 2709295050 2705835000 2706379950 2706924900 2707469850 2708014800 2708559750 2709104700 2709649650 2710194600 2710739550 2711284500 2711829450 2712374400 2712919350 2713464300 2714009250 2714554200 2715099150 2715644100 2716189050 2716734000 2717278950 2717823900 2718368850 2718913800 2719458750 2720003700 2720548650 2721093600 2721638550 2722183500 2722728450 2723273400 2723818350 2724363300 2724908250 2725453200 2725998150 2726543100 2727088050 2727633000 2728177950 2728722900 2729267850 2729812800 2730357750 2730902700 2731447650 2731992600 2732537550 2733082500 2733627450 2734172400 2734717350 2735262300 2735807250 2736352200 2736897150 2737442100 2737987050 2738532000 2739076950 2739621900 2740166850 2740711800 2741256750 2741801700 2742346650 2742891600 2743436550 2743981500 2744526450 2745071400 2745616350 2746161300 2746706250 2747251200 2747796150 2748341100 2748886050 2749431000 2749975950 2750520900 2751065850 2751610800 2752155750 2752700700 2753245650 2753790600 2754335550 2754880500 2755425450 2755970400 2756515350 2757060300 2757605250 2758150200 2758695150 2759240100 2759785050 2755335000 2755889950 2756444900 2756999850 2757554800 2758109750 2758664700 2759219650 2759774600 2760329550 2760884500 2761439450 2761994400 2762549350 2763104300 2763659250 2764214200 2764769150 2765324100 2765879050 2766434000 2766988950 2767543900 2768098850 2768653800 2769208750 2769763700 2770318650 2770873600 2771428550 2771983500 2772538450 2773093400 2773648350 2774203300 2774758250 2775313200 2775868150 2776423100 2776978050 2777533000 2778087950 2778642900 2779197850 2779752800 2780307750 2780862700 2781417650 2781972600 2782527550 2783082500 2783637450 2784192400 2784747350 2785302300 2785857250 2786412200 2786967150 2787522100 2788077050 2788632000 2789186950 2789741900 2790296850 2790851800 2791406750 2791961700 2792516650 2793071600 2793626550 2794181500 2794736450 2795291400 2795846350 2796401300 2796956250 2797511200 2798066150 2798621100 2799176050 2799731000 2800285950 2800840900 2801395850 2801950800 2802505750 2803060700 2803615650 2804170600 2804725550 2805280500 2805835450 2806390400 2806945350 2807500300 2808055250 2808610200 2809165150 2809720100 2810275050 2804835000 2805399950 2805964900 2806529850 2807094800 2807659750 2808224700 2808789650 2809354600 2809919550 2810484500 2811049450 2811614400 2812179350 2812744300 2813309250 2813874200 2814439150 2815004100 2815569050 2816134000 2816698950 2817263900 2817828850 2818393800 2818958750 2819523700 2820088650 2820653600 2821218550 2821783500 2822348450 2822913400 2823478350 2824043300 2824608250 2825173200 2825738150 2826303100 2826868050 2827433000 2827997950 2828562900 2829127850 2829692800 2830257750 2830822700 2831387650 2831952600 2832517550 2833082500 2833647450 2834212400 2834777350 2835342300 2835907250 2836472200 2837037150 2837602100 2838167050 2838732000 2839296950 2839861900 2840426850 2840991800 2841556750 2842121700 2842686650 2843251600 2843816550 2844381500 2844946450 2845511400 2846076350 2846641300 2847206250 2847771200 2848336150 2848901100 2849466050 2850031000 2850595950 2851160900 2851725850 2852290800 2852855750 2853420700 2853985650 2854550600 2855115550 2855680500 2856245450 2856810400 2857375350 2857940300 2858505250 2859070200 2859635150 2860200100 2860765050 2854335000 2854909950 2855484900 2856059850 2856634800 2857209750 2857784700 2858359650 2858934600 2859509550 2860084500 2860659450 2861234400 2861809350 2862384300 2862959250 2863534200 2864109150 2864684100 2865259050 2865834000 2866408950 2866983900 2867558850 2868133800 2868708750 2869283700 2869858650 2870433600 2871008550 2871583500 2872158450 2872733400 2873308350 2873883300 2874458250 2875033200 2875608150 2876183100 2876758050 2877333000 2877907950 2878482900 2879057850 2879632800 2880207750 2880782700 2881357650 2881932600 2882507550 2883082500 2883657450 2884232400 2884807350 2885382300 2885957250 2886532200 2887107150 2887682100 2888257050 2888832000 2889406950 2889981900 2890556850 2891131800 2891706750 2892281700 2892856650 2893431600 2894006550 2894581500 2895156450 2895731400 2896306350 2896881300 2897456250 2898031200 2898606150 2899181100 2899756050 2900331000 2900905950 2901480900 2902055850 2902630800 2903205750 2903780700 2904355650 2904930600 2905505550 2906080500 2906655450 2907230400 2907805350 2908380300 2908955250 2909530200 2910105150 2910680100 2911255050 2903835000 2904419950 2905004900 2905589850 2906174800 2906759750 2907344700 2907929650 2908514600 2909099550 2909684500 2910269450 2910854400 2911439350 2912024300 2912609250 2913194200 2913779150 2914364100 2914949050 2915534000 2916118950 2916703900 2917288850 2917873800 2918458750 2919043700 2919628650 2920213600 2920798550 2921383500 2921968450 2922553400 2923138350 2923723300 2924308250 2924893200 2925478150 2926063100 2926648050 2927233000 2927817950 2928402900 2928987850 2929572800 2930157750 2930742700 2931327650 2931912600 2932497550 2933082500 2933667450 2934252400 2934837350 2935422300 2936007250 2936592200 2937177150 2937762100 2938347050 2938932000 2939516950 2940101900 2940686850 2941271800 2941856750 2942441700 2943026650 2943611600 2944196550 2944781500 2945366450 2945951400 2946536350 2947121300 2947706250 2948291200 2948876150 2949461100 2950046050 2950631000 2951215950 2951800900 2952385850 2952970800 2953555750 2954140700 2954725650 2955310600 2955895550 2956480500 2957065450 2957650400 2958235350 2958820300 2959405250 2959990200 2960575150 2961160100 2961745050 2953335000 2953929950 2954524900 2955119850 2955714800 2956309750 2956904700 2957499650 2958094600 2958689550 2959284500 2959879450 2960474400 2961069350 2961664300 2962259250 2962854200 2963449150 2964044100 2964639050 2965234000 2965828950 2966423900 2967018850 2967613800 2968208750 2968803700 2969398650 2969993600 2970588550 2971183500 2971778450 2972373400 2972968350 2973563300 2974158250 2974753200 2975348150 2975943100 2976538050 2977133000 2977727950 2978322900 2978917850 2979512800 2980107750 2980702700 2981297650 2981892600 2982487550 2983082500 2983677450 2984272400 2984867350 2985462300 2986057250 2986652200 2987247150 2987842100 2988437050 2989032000 2989626950 2990221900 2990816850 2991411800 2992006750 2992601700 2993196650 2993791600 2994386550 2994981500 2995576450 2996171400 2996766350 2997361300 2997956250 2998551200 2999146150 2999741100 3000336050 3000931000 3001525950 3002120900 3002715850 3003310800 3003905750 3004500700 3005095650 3005690600 3006285550 3006880500 3007475450 3008070400 3008665350 3009260300 3009855250 3010450200 3011045150 3011640100 3012235050 3002835000 3003439950 3004044900 3004649850 3005254800 3005859750 3006464700 3007069650 3007674600 3008279550 3008884500 3009489450 3010094400 3010699350 3011304300 3011909250 3012514200 3013119150 3013724100 3014329050 3014934000 3015538950 3016143900 3016748850 3017353800 3017958750 3018563700 3019168650 3019773600 3020378550 3020983500 3021588450 3022193400 3022798350 3023403300 3024008250 3024613200 3025218150 3025823100 3026428050 3027033000 3027637950 3028242900 3028847850 3029452800 3030057750 3030662700 3031267650 3031872600 3032477550 3033082500 3033687450 3034292400 3034897350 3035502300 3036107250 3036712200 3037317150 3037922100 3038527050 3039132000 3039736950 3040341900 3040946850 3041551800 3042156750 3042761700 3043366650 3043971600 3044576550 3045181500 3045786450 3046391400 3046996350 3047601300 3048206250 3048811200 3049416150 3050021100 3050626050 3051231000 3051835950 3052440900 3053045850 3053650800 3054255750 3054860700 3055465650 3056070600 3056675550 3057280500 3057885450 3058490400 3059095350 3059700300 3060305250 3060910200 3061515150 3062120100 3062725050 3052335000 3052949950 3053564900 3054179850 3054794800 3055409750 3056024700 3056639650 3057254600 3057869550 3058484500 3059099450 3059714400 3060329350 3060944300 3061559250 3062174200 3062789150 3063404100 3064019050 3064634000 3065248950 3065863900 3066478850 3067093800 3067708750 3068323700 3068938650 3069553600 3070168550 3070783500 3071398450 3072013400 3072628350 3073243300 3073858250 3074473200 3075088150 3075703100 3076318050 3076933000 3077547950 3078162900 3078777850 3079392800 3080007750 3080622700 3081237650 3081852600 3082467550 3083082500 3083697450 3084312400 3084927350 3085542300 3086157250 3086772200 3087387150 3088002100 3088617050 3089232000 3089846950 3090461900 3091076850 3091691800 3092306750 3092921700 3093536650 3094151600 3094766550 3095381500 3095996450 3096611400 3097226350 3097841300 3098456250 3099071200 3099686150 3100301100 3100916050 3101531000 3102145950 3102760900 3103375850 3103990800 3104605750 3105220700 3105835650 3106450600 3107065550 3107680500 3108295450 3108910400 3109525350 3110140300 3110755250 3111370200 3111985150 3112600100 3113215050 3101835000 3102459950 3103084900 3103709850 3104334800 3104959750 3105584700 3106209650 3106834600 3107459550 3108084500 3108709450 3109334400 3109959350 3110584300 3111209250 3111834200 3112459150 3113084100 3113709050 3114334000 3114958950 3115583900 3116208850 3116833800 3117458750 3118083700 3118708650 3119333600 3119958550 3120583500 3121208450 3121833400 3122458350 3123083300 3123708250 3124333200 3124958150 3125583100 3126208050 3126833000 3127457950 3128082900 3128707850 3129332800 3129957750 3130582700 3131207650 3131832600 3132457550 3133082500 3133707450 3134332400 3134957350 3135582300 3136207250 3136832200 3137457150 3138082100 3138707050 3139332000 3139956950 3140581900 3141206850 3141831800 3142456750 3143081700 3143706650 3144331600 3144956550 3145581500 3146206450 3146831400 3147456350 3148081300 3148706250 3149331200 3149956150 3150581100 3151206050 3151831000 3152455950 3153080900 3153705850 3154330800 3154955750 3155580700 3156205650 3156830600 3157455550 3158080500 3158705450 3159330400 3159955350 3160580300 3161205250 3161830200 3162455150 3163080100 3163705050 3151335000 3151969950 3152604900 3153239850 3153874800 3154509750 3155144700 3155779650 3156414600 3157049550 3157684500 3158319450 3158954400 3159589350 3160224300 3160859250 3161494200 3162129150 3162764100 3163399050 3164034000 3164668950 3165303900 3165938850 3166573800 3167208750 3167843700 3168478650 3169113600 3169748550 3170383500 3171018450 3171653400 3172288350 3172923300 3173558250 3174193200 3174828150 3175463100 3176098050 3176733000 3177367950 3178002900 3178637850 3179272800 3179907750 3180542700 3181177650 3181812600 3182447550 3183082500 3183717450 3184352400 3184987350 3185622300 3186257250 3186892200 3187527150 3188162100 3188797050 3189432000 3190066950 3190701900 3191336850 3191971800 3192606750 3193241700 3193876650 3194511600 3195146550 3195781500 3196416450 3197051400 3197686350 3198321300 3198956250 3199591200 3200226150 3200861100 3201496050 3202131000 3202765950 3203400900 3204035850 3204670800 3205305750 3205940700 3206575650 3207210600 3207845550 3208480500 3209115450 3209750400 3210385350 3211020300 3211655250 3212290200 3212925150 3213560100 3214195050 3200835000 3201479950 3202124900 3202769850 3203414800 3204059750 3204704700 3205349650 3205994600 3206639550 3207284500 3207929450 3208574400 3209219350 3209864300 3210509250 3211154200 3211799150 3212444100 3213089050 3213734000 3214378950 3215023900 3215668850 3216313800 3216958750 3217603700 3218248650 3218893600 3219538550 3220183500 3220828450 3221473400 3222118350 3222763300 3223408250 3224053200 3224698150 3225343100 3225988050 3226633000 3227277950 3227922900 3228567850 3229212800 3229857750 3230502700 3231147650 3231792600 3232437550 3233082500 3233727450 3234372400 3235017350 3235662300 3236307250 3236952200 3237597150 3238242100 3238887050 3239532000 3240176950 3240821900 3241466850 3242111800 3242756750 3243401700 3244046650 3244691600 3245336550 3245981500 3246626450 3247271400 3247916350 3248561300 3249206250 3249851200 3250496150 3251141100 3251786050 3252431000 3253075950 3253720900 3254365850 3255010800 3255655750 3256300700 3256945650 3257590600 3258235550 3258880500 3259525450 3260170400 3260815350 3261460300 3262105250 3262750200 3263395150 3264040100 3264685050 3250335000 3250989950 3251644900 3252299850 3252954800 3253609750 3254264700 3254919650 3255574600 3256229550 3256884500 3257539450 3258194400 3258849350 3259504300 3260159250 3260814200 3261469150 3262124100 3262779050 3263434000 3264088950 3264743900 3265398850 3266053800 3266708750 3267363700 3268018650 3268673600 3269328550 3269983500 3270638450 3271293400 3271948350 3272603300 3273258250 3273913200 3274568150 3275223100 3275878050 3276533000 3277187950 3277842900 3278497850 3279152800 3279807750 3280462700 3281117650 3281772600 3282427550 3283082500 3283737450 3284392400 3285047350 3285702300 3286357250 3287012200 3287667150 3288322100 3288977050 3289632000 3290286950 3290941900 3291596850 3292251800 3292906750 3293561700 3294216650 3294871600 3295526550 3296181500 3296836450 3297491400 3298146350 3298801300 3299456250 3300111200 3300766150 3301421100 3302076050 3302731000 3303385950 3304040900 3304695850 3305350800 3306005750 3306660700 3307315650 3307970600 3308625550 3309280500 3309935450 3310590400 3311245350 3311900300 3312555250 3313210200 3313865150 3314520100 3315175050 3299835000 3300499950 3301164900 3301829850 3302494800 3303159750 3303824700 3304489650 3305154600 3305819550 3306484500 3307149450 3307814400 3308479350 3309144300 3309809250 3310474200 3311139150 3311804100 3312469050 3313134000 3313798950 3314463900 3315128850 3315793800 3316458750 3317123700 3317788650 3318453600 3319118550 3319783500 3320448450 3321113400 3321778350 3322443300 3323108250 3323773200 3324438150 3325103100 3325768050 3326433000 3327097950 3327762900 3328427850 3329092800 3329757750 3330422700 3331087650 3331752600 3332417550 3333082500 3333747450 3334412400 3335077350 3335742300 3336407250 3337072200 3337737150 3338402100 3339067050 3339732000 3340396950 3341061900 3341726850 3342391800 3343056750 3343721700 3344386650 3345051600 3345716550 3346381500 3347046450 3347711400 3348376350 3349041300 3349706250 3350371200 3351036150 3351701100 3352366050 3353031000 3353695950 3354360900 3355025850 3355690800 3356355750 3357020700 3357685650 3358350600 3359015550 3359680500 3360345450 3361010400 3361675350 3362340300 3363005250 3363670200 3364335150 3365000100 3365665050 3349335000 3350009950 3350684900 3351359850 3352034800 3352709750 3353384700 3354059650 3354734600 3355409550 3356084500 3356759450 3357434400 3358109350 3358784300 3359459250 3360134200 3360809150 3361484100 3362159050 3362834000 3363508950 3364183900 3364858850 3365533800 3366208750 3366883700 3367558650 3368233600 3368908550 3369583500 3370258450 3370933400 3371608350 3372283300 3372958250 3373633200 3374308150 3374983100 3375658050 3376333000 3377007950 3377682900 3378357850 3379032800 3379707750 3380382700 3381057650 3381732600 3382407550 3383082500 3383757450 3384432400 3385107350 3385782300 3386457250 3387132200 3387807150 3388482100 3389157050 3389832000 3390506950 3391181900 3391856850 3392531800 3393206750 3393881700 3394556650 3395231600 3395906550 3396581500 3397256450 3397931400 3398606350 3399281300 3399956250 3400631200 3401306150 3401981100 3402656050 3403331000 3404005950 3404680900 3405355850 3406030800 3406705750 3407380700 3408055650 3408730600 3409405550 3410080500 3410755450 3411430400 3412105350 3412780300 3413455250 3414130200 3414805150 3415480100 3416155050 3398835000 3399519950 3400204900 3400889850 3401574800 3402259750 3402944700 3403629650 3404314600 3404999550 3405684500 3406369450 3407054400 3407739350 3408424300 3409109250 3409794200 3410479150 3411164100 3411849050 3412534000 3413218950 3413903900 3414588850 3415273800 3415958750 3416643700 3417328650 3418013600 3418698550 3419383500 3420068450 3420753400 3421438350 3422123300 3422808250 3423493200 3424178150 3424863100 3425548050 3426233000 3426917950 3427602900 3428287850 3428972800 3429657750 3430342700 3431027650 3431712600 3432397550 3433082500 3433767450 3434452400 3435137350 3435822300 3436507250 3437192200 3437877150 3438562100 3439247050 3439932000 3440616950 3441301900 3441986850 3442671800 3443356750 3444041700 3444726650 3445411600 3446096550 3446781500 3447466450 3448151400 3448836350 3449521300 3450206250 3450891200 3451576150 3452261100 3452946050 3453631000 3454315950 3455000900 3455685850 3456370800 3457055750 3457740700 3458425650 3459110600 3459795550 3460480500 3461165450 3461850400 3462535350 3463220300 3463905250 3464590200 3465275150 3465960100 3466645050 3448335000 3449029950 3449724900 3450419850 3451114800 3451809750 3452504700 3453199650 3453894600 3454589550 3455284500 3455979450 3456674400 3457369350 3458064300 3458759250 3459454200 3460149150 3460844100 3461539050 3462234000 3462928950 3463623900 3464318850 3465013800 3465708750 3466403700 3467098650 3467793600 3468488550 3469183500 3469878450 3470573400 3471268350 3471963300 3472658250 3473353200 3474048150 3474743100 3475438050 3476133000 3476827950 3477522900 3478217850 3478912800 3479607750 3480302700 3480997650 3481692600 3482387550 3483082500 3483777450 3484472400 3485167350 3485862300 3486557250 3487252200 3487947150 3488642100 3489337050 3490032000 3490726950 3491421900 3492116850 3492811800 3493506750 3494201700 3494896650 3495591600 3496286550 3496981500 3497676450 3498371400 3499066350 3499761300 3500456250 3501151200 3501846150 3502541100 3503236050 3503931000 3504625950 3505320900 3506015850 3506710800 3507405750 3508100700 3508795650 3509490600 3510185550 3510880500 3511575450 3512270400 3512965350 3513660300 3514355250 3515050200 3515745150 3516440100 3517135050 3497835000 3498539950 3499244900 3499949850 3500654800 3501359750 3502064700 3502769650 3503474600 3504179550 3504884500 3505589450 3506294400 3506999350 3507704300 3508409250 3509114200 3509819150 3510524100 3511229050 3511934000 3512638950 3513343900 3514048850 3514753800 3515458750 3516163700 3516868650 3517573600 3518278550 3518983500 3519688450 3520393400 3521098350 3521803300 3522508250 3523213200 3523918150 3524623100 3525328050 3526033000 3526737950 3527442900 3528147850 3528852800 3529557750 3530262700 3530967650 3531672600 3532377550 3533082500 3533787450 3534492400 3535197350 3535902300 3536607250 3537312200 3538017150 3538722100 3539427050 3540132000 3540836950 3541541900 3542246850 3542951800 3543656750 3544361700 3545066650 3545771600 3546476550 3547181500 3547886450 3548591400 3549296350 3550001300 3550706250 3551411200 3552116150 3552821100 3553526050 3554231000 3554935950 3555640900 3556345850 3557050800 3557755750 3558460700 3559165650 3559870600 3560575550 3561280500 3561985450 3562690400 3563395350 3564100300 3564805250 3565510200 3566215150 3566920100 3567625050 3547335000 3548049950 3548764900 3549479850 3550194800 3550909750 3551624700 3552339650 3553054600 3553769550 3554484500 3555199450 3555914400 3556629350 3557344300 3558059250 3558774200 3559489150 3560204100 3560919050 3561634000 3562348950 3563063900 3563778850 3564493800 3565208750 3565923700 3566638650 3567353600 3568068550 3568783500 3569498450 3570213400 3570928350 3571643300 3572358250 3573073200 3573788150 3574503100 3575218050 3575933000 3576647950 3577362900 3578077850 3578792800 3579507750 3580222700 3580937650 3581652600 3582367550 3583082500 3583797450 3584512400 3585227350 3585942300 3586657250 3587372200 3588087150 3588802100 3589517050 3590232000 3590946950 3591661900 3592376850 3593091800 3593806750 3594521700 3595236650 3595951600 3596666550 3597381500 3598096450 3598811400 3599526350 3600241300 3600956250 3601671200 3602386150 3603101100 3603816050 3604531000 3605245950 3605960900 3606675850 3607390800 3608105750 3608820700 3609535650 3610250600 3610965550 3611680500 3612395450 3613110400 3613825350 3614540300 3615255250 3615970200 3616685150 3617400100 3618115050 3596835000 3597559950 3598284900 3599009850 3599734800 3600459750 3601184700 3601909650 3602634600 3603359550 3604084500 3604809450 3605534400 3606259350 3606984300 3607709250 3608434200 3609159150 3609884100 3610609050 3611334000 3612058950 3612783900 3613508850 3614233800 3614958750 3615683700 3616408650 3617133600 3617858550 3618583500 3619308450 3620033400 3620758350 3621483300 3622208250 3622933200 3623658150 3624383100 3625108050 3625833000 3626557950 3627282900 3628007850 3628732800 3629457750 3630182700 3630907650 3631632600 3632357550 3633082500 3633807450 3634532400 3635257350 3635982300 3636707250 3637432200 3638157150 3638882100 3639607050 3640332000 3641056950 3641781900 3642506850 3643231800 3643956750 3644681700 3645406650 3646131600 3646856550 3647581500 3648306450 3649031400 3649756350 3650481300 3651206250 3651931200 3652656150 3653381100 3654106050 3654831000 3655555950 3656280900 3657005850 3657730800 3658455750 3659180700 3659905650 3660630600 3661355550 3662080500 3662805450 3663530400 3664255350 3664980300 3665705250 3666430200 3667155150 3667880100 3668605050 3646335000 3647069950 3647804900 3648539850 3649274800 3650009750 3650744700 3651479650 3652214600 3652949550 3653684500 3654419450 3655154400 3655889350 3656624300 3657359250 3658094200 3658829150 3659564100 3660299050 3661034000 3661768950 3662503900 3663238850 3663973800 3664708750 3665443700 3666178650 3666913600 3667648550 3668383500 3669118450 3669853400 3670588350 3671323300 3672058250 3672793200 3673528150 3674263100 3674998050 3675733000 3676467950 3677202900 3677937850 3678672800 3679407750 3680142700 3680877650 3681612600 3682347550 3683082500 3683817450 3684552400 3685287350 3686022300 3686757250 3687492200 3688227150 3688962100 3689697050 3690432000 3691166950 3691901900 3692636850 3693371800 3694106750 3694841700 3695576650 3696311600 3697046550 3697781500 3698516450 3699251400 3699986350 3700721300 3701456250 3702191200 3702926150 3703661100 3704396050 3705131000 3705865950 3706600900 3707335850 3708070800 3708805750 3709540700 3710275650 3711010600 3711745550 3712480500 3713215450 3713950400 3714685350 3715420300 3716155250 3716890200 3717625150 3718360100 3719095050 3695835000 3696579950 3697324900 3698069850 3698814800 3699559750 3700304700 3701049650 3701794600 3702539550 3703284500 3704029450 3704774400 3705519350 3706264300 3707009250 3707754200 3708499150 3709244100 3709989050 3710734000 3711478950 3712223900 3712968850 3713713800 3714458750 3715203700 3715948650 3716693600 3717438550 3718183500 3718928450 3719673400 3720418350 3721163300 3721908250 3722653200 3723398150 3724143100 3724888050 3725633000 3726377950 3727122900 3727867850 3728612800 3729357750 3730102700 3730847650 3731592600 3732337550 3733082500 3733827450 3734572400 3735317350 3736062300 3736807250 3737552200 3738297150 3739042100 3739787050 3740532000 3741276950 3742021900 3742766850 3743511800 3744256750 3745001700 3745746650 3746491600 3747236550 3747981500 3748726450 3749471400 3750216350 3750961300 3751706250 3752451200 3753196150 3753941100 3754686050 3755431000 3756175950 3756920900 3757665850 3758410800 3759155750 3759900700 3760645650 3761390600 3762135550 3762880500 3763625450 3764370400 3765115350 3765860300 3766605250 3767350200 3768095150 3768840100 3769585050 3745335000 3746089950 3746844900 3747599850 3748354800 3749109750 3749864700 3750619650 3751374600 3752129550 3752884500 3753639450 3754394400 3755149350 3755904300 3756659250 3757414200 3758169150 3758924100 3759679050 3760434000 3761188950 3761943900 3762698850 3763453800 3764208750 3764963700 3765718650 3766473600 3767228550 3767983500 3768738450 3769493400 3770248350 3771003300 3771758250 3772513200 3773268150 3774023100 3774778050 3775533000 3776287950 3777042900 3777797850 3778552800 3779307750 3780062700 3780817650 3781572600 3782327550 3783082500 3783837450 3784592400 3785347350 3786102300 3786857250 3787612200 3788367150 3789122100 3789877050 3790632000 3791386950 3792141900 3792896850 3793651800 3794406750 3795161700 3795916650 3796671600 3797426550 3798181500 3798936450 3799691400 3800446350 3801201300 3801956250 3802711200 3803466150 3804221100 3804976050 3805731000 3806485950 3807240900 3807995850 3808750800 3809505750 3810260700 3811015650 3811770600 3812525550 3813280500 3814035450 3814790400 3815545350 3816300300 3817055250 3817810200 3818565150 3819320100 3820075050 3794835000 3795599950 3796364900 3797129850 3797894800 3798659750 3799424700 3800189650 3800954600 3801719550 3802484500 3803249450 3804014400 3804779350 3805544300 3806309250 3807074200 3807839150 3808604100 3809369050 3810134000 3810898950 3811663900 3812428850 3813193800 3813958750 3814723700 3815488650 3816253600 3817018550 3817783500 3818548450 3819313400 3820078350 3820843300 3821608250 3822373200 3823138150 3823903100 3824668050 3825433000 3826197950 3826962900 3827727850 3828492800 3829257750 3830022700 3830787650 3831552600 3832317550 3833082500 3833847450 3834612400 3835377350 3836142300 3836907250 3837672200 3838437150 3839202100 3839967050 3840732000 3841496950 3842261900 3843026850 3843791800 3844556750 3845321700 3846086650 3846851600 3847616550 3848381500 3849146450 3849911400 3850676350 3851441300 3852206250 3852971200 3853736150 3854501100 3855266050 3856031000 3856795950 3857560900 3858325850 3859090800 3859855750 3860620700 3861385650 3862150600 3862915550 3863680500 3864445450 3865210400 3865975350 3866740300 3867505250 3868270200 3869035150 3869800100 3870565050 3844335000 3845109950 3845884900 3846659850 3847434800 3848209750 3848984700 3849759650 3850534600 3851309550 3852084500 3852859450 3853634400 3854409350 3855184300 3855959250 3856734200 3857509150 3858284100 3859059050 3859834000 3860608950 3861383900 3862158850 3862933800 3863708750 3864483700 3865258650 3866033600 3866808550 3867583500 3868358450 3869133400 3869908350 3870683300 3871458250 3872233200 3873008150 3873783100 3874558050 3875333000 3876107950 3876882900 3877657850 3878432800 3879207750 3879982700 3880757650 3881532600 3882307550 3883082500 3883857450 3884632400 3885407350 3886182300 3886957250 3887732200 3888507150 3889282100 3890057050 3890832000 3891606950 3892381900 3893156850 3893931800 3894706750 3895481700 3896256650 3897031600 3897806550 3898581500 3899356450 3900131400 3900906350 3901681300 3902456250 3903231200 3904006150 3904781100 3905556050 3906331000 3907105950 3907880900 3908655850 3909430800 3910205750 3910980700 3911755650 3912530600 3913305550 3914080500 3914855450 3915630400 3916405350 3917180300 3917955250 3918730200 3919505150 3920280100 3921055050 3893835000 3894619950 3895404900 3896189850 3896974800 3897759750 3898544700 3899329650 3900114600 3900899550 3901684500 3902469450 3903254400 3904039350 3904824300 3905609250 3906394200 3907179150 3907964100 3908749050 3909534000 3910318950 3911103900 3911888850 3912673800 3913458750 3914243700 3915028650 3915813600 3916598550 3917383500 3918168450 3918953400 3919738350 3920523300 3921308250 3922093200 3922878150 3923663100 3924448050 3925233000 3926017950 3926802900 3927587850 3928372800 3929157750 3929942700 3930727650 3931512600 3932297550 3933082500 3933867450 3934652400 3935437350 3936222300 3937007250 3937792200 3938577150 3939362100 3940147050 3940932000 3941716950 3942501900 3943286850 3944071800 3944856750 3945641700 3946426650 3947211600 3947996550 3948781500 3949566450 3950351400 3951136350 3951921300 3952706250 3953491200 3954276150 3955061100 3955846050 3956631000 3957415950 3958200900 3958985850 3959770800 3960555750 3961340700 3962125650 3962910600 3963695550 3964480500 3965265450 3966050400 3966835350 3967620300 3968405250 3969190200 3969975150 3970760100 3971545050 3943335000 3944129950 3944924900 3945719850 3946514800 3947309750 3948104700 3948899650 3949694600 3950489550 3951284500 3952079450 3952874400 3953669350 3954464300 3955259250 3956054200 3956849150 3957644100 3958439050 3959234000 3960028950 3960823900 3961618850 3962413800 3963208750 3964003700 3964798650 3965593600 3966388550 3967183500 3967978450 3968773400 3969568350 3970363300 3971158250 3971953200 3972748150 3973543100 3974338050 3975133000 3975927950 3976722900 3977517850 3978312800 3979107750 3979902700 3980697650 3981492600 3982287550 3983082500 3983877450 3984672400 3985467350 3986262300 3987057250 3987852200 3988647150 3989442100 3990237050 3991032000 3991826950 3992621900 3993416850 3994211800 3995006750 3995801700 3996596650 3997391600 3998186550 3998981500 3999776450 4000571400 4001366350 4002161300 4002956250 4003751200 4004546150 4005341100 4006136050 4006931000 4007725950 4008520900 4009315850 4010110800 4010905750 4011700700 4012495650 4013290600 4014085550 4014880500 4015675450 4016470400 4017265350 4018060300 4018855250 4019650200 4020445150 4021240100 4022035050 3992835000 3993639950 3994444900 3995249850 3996054800 3996859750 3997664700 3998469650 3999274600 4000079550 4000884500 4001689450 4002494400 4003299350 4004104300 4004909250 4005714200 4006519150 4007324100 4008129050 4008934000 4009738950 4010543900 4011348850 4012153800 4012958750 4013763700 4014568650 4015373600 4016178550 4016983500 4017788450 4018593400 4019398350 4020203300 4021008250 4021813200 4022618150 4023423100 4024228050 4025033000 4025837950 4026642900 4027447850 4028252800 4029057750 4029862700 4030667650 4031472600 4032277550 4033082500 4033887450 4034692400 4035497350 4036302300 4037107250 4037912200 4038717150 4039522100 4040327050 4041132000 4041936950 4042741900 4043546850 4044351800 4045156750 4045961700 4046766650 4047571600 4048376550 4049181500 4049986450 4050791400 4051596350 4052401300 4053206250 4054011200 4054816150 4055621100 4056426050 4057231000 4058035950 4058840900 4059645850 4060450800 4061255750 4062060700 4062865650 4063670600 4064475550 4065280500 4066085450 4066890400 4067695350 4068500300 4069305250 4070110200 4070915150 4071720100 4072525050 4042335000 4043149950 4043964900 4044779850 4045594800 4046409750 4047224700 4048039650 4048854600 4049669550 4050484500 4051299450 4052114400 4052929350 4053744300 4054559250 4055374200 4056189150 4057004100 4057819050 4058634000 4059448950 4060263900 4061078850 4061893800 4062708750 4063523700 4064338650 4065153600 4065968550 4066783500 4067598450 4068413400 4069228350 4070043300 4070858250 4071673200 4072488150 4073303100 4074118050 4074933000 4075747950 4076562900 4077377850 4078192800 4079007750 4079822700 4080637650 4081452600 4082267550 4083082500 4083897450 4084712400 4085527350 4086342300 4087157250 4087972200 4088787150 4089602100 4090417050 4091232000 4092046950 4092861900 4093676850 4094491800 4095306750 4096121700 4096936650 4097751600 4098566550 4099381500 4100196450 4101011400 4101826350 4102641300 4103456250 4104271200 4105086150 4105901100 4106716050 4107531000 4108345950 4109160900 4109975850 4110790800 4111605750 4112420700 4113235650 4114050600 4114865550 4115680500 4116495450 4117310400 4118125350 4118940300 4119755250 4120570200 4121385150 4122200100 4123015050 4091835000 4092659950 4093484900 4094309850 4095134800 4095959750 4096784700 4097609650 4098434600 4099259550 4100084500 4100909450 4101734400 4102559350 4103384300 4104209250 4105034200 4105859150 4106684100 4107509050 4108334000 4109158950 4109983900 4110808850 4111633800 4112458750 4113283700 4114108650 4114933600 4115758550 4116583500 4117408450 4118233400 4119058350 4119883300 4120708250 4121533200 4122358150 4123183100 4124008050 4124833000 4125657950 4126482900 4127307850 4128132800 4128957750 4129782700 4130607650 4131432600 4132257550 4133082500 4133907450 4134732400 4135557350 4136382300 4137207250 4138032200 4138857150 4139682100 4140507050 4141332000 4142156950 4142981900 4143806850 4144631800 4145456750 4146281700 4147106650 4147931600 4148756550 4149581500 4150406450 4151231400 4152056350 4152881300 4153706250 4154531200 4155356150 4156181100 4157006050 4157831000 4158655950 4159480900 4160305850 4161130800 4161955750 4162780700 4163605650 4164430600 4165255550 4166080500 4166905450 4167730400 4168555350 4169380300 4170205250 4171030200 4171855150 4172680100 4173505050 4141335000 4142169950 4143004900 4143839850 4144674800 4145509750 4146344700 4147179650 4148014600 4148849550 4149684500 4150519450 4151354400 4152189350 4153024300 4153859250 4154694200 4155529150 4156364100 4157199050 4158034000 4158868950 4159703900 4160538850 4161373800 4162208750 4163043700 4163878650 4164713600 4165548550 4166383500 4167218450 4168053400 4168888350 4169723300 4170558250 4171393200 4172228150 4173063100 4173898050 4174733000 4175567950 4176402900 4177237850 4178072800 4178907750 4179742700 4180577650 4181412600 4182247550 4183082500 4183917450 4184752400 4185587350 4186422300 4187257250 4188092200 4188927150 4189762100 4190597050 4191432000 4192266950 4193101900 4193936850 4194771800 4195606750 4196441700 4197276650 4198111600 4198946550 4199781500 4200616450 4201451400 4202286350 4203121300 4203956250 4204791200 4205626150 4206461100 4207296050 4208131000 4208965950 4209800900 4210635850 4211470800 4212305750 4213140700 4213975650 4214810600 4215645550 4216480500 4217315450 4218150400 4218985350 4219820300 4220655250 4221490200 4222325150 4223160100 4223995050 4190835000 4191679950 4192524900 4193369850 4194214800 4195059750 4195904700 4196749650 4197594600 4198439550 4199284500 4200129450 4200974400 4201819350 4202664300 4203509250 4204354200 4205199150 4206044100 4206889050 4207734000 4208578950 4209423900 4210268850 4211113800 4211958750 4212803700 4213648650 4214493600 4215338550 4216183500 4217028450 4217873400 4218718350 4219563300 4220408250 4221253200 4222098150 4222943100 4223788050 4224633000 4225477950 4226322900 4227167850 4228012800 4228857750 4229702700 4230547650 4231392600 4232237550 4233082500 4233927450 4234772400 4235617350 4236462300 4237307250 4238152200 4238997150 4239842100 4240687050 4241532000 4242376950 4243221900 4244066850 4244911800 4245756750 4246601700 4247446650 4248291600 4249136550 4249981500 4250826450 4251671400 4252516350 4253361300 4254206250 4255051200 4255896150 4256741100 4257586050 4258431000 4259275950 4260120900 4260965850 4261810800 4262655750 4263500700 4264345650 4265190600 4266035550 4266880500 4267725450 4268570400 4269415350 4270260300 4271105250 4271950200 4272795150 4273640100 4274485050 4240335000 4241189950 4242044900 4242899850 4243754800 4244609750 4245464700 4246319650 4247174600 4248029550 4248884500 4249739450 4250594400 4251449350 4252304300 4253159250 4254014200 4254869150 4255724100 4256579050 4257434000 4258288950 4259143900 4259998850 4260853800 4261708750 4262563700 4263418650 4264273600 4265128550 4265983500 4266838450 4267693400 4268548350 4269403300 4270258250 4271113200 4271968150 4272823100 4273678050 4274533000 4275387950 4276242900 4277097850 4277952800 4278807750 4279662700 4280517650 4281372600 4282227550 4283082500 4283937450 4284792400 4285647350 4286502300 4287357250 4288212200 4289067150 4289922100 4290777050 4291632000 4292486950 4293341900 4294196850 4295051800 4295906750 4296761700 4297616650 4298471600 4299326550 4300181500 4301036450 4301891400 4302746350 4303601300 4304456250 4305311200 4306166150 4307021100 4307876050 4308731000 4309585950 4310440900 4311295850 4312150800 4313005750 4313860700 4314715650 4315570600 4316425550 4317280500 4318135450 4318990400 4319845350 4320700300 4321555250 4322410200 4323265150 4324120100 4324975050 4289835000 4290699950 4291564900 4292429850 4293294800 4294159750 4295024700 4295889650 4296754600 4297619550 4298484500 4299349450 4300214400 4301079350 4301944300 4302809250 4303674200 4304539150 4305404100 4306269050 4307134000 4307998950 4308863900 4309728850 4310593800 4311458750 4312323700 4313188650 4314053600 4314918550 4315783500 4316648450 4317513400 4318378350 4319243300 4320108250 4320973200 4321838150 4322703100 4323568050 4324433000 4325297950 4326162900 4327027850 4327892800 4328757750 4329622700 4330487650 4331352600 4332217550 4333082500 4333947450 4334812400 4335677350 4336542300 4337407250 4338272200 4339137150 4340002100 4340867050 4341732000 4342596950 4343461900 4344326850 4345191800 4346056750 4346921700 4347786650 4348651600 4349516550 4350381500 4351246450 4352111400 4352976350 4353841300 4354706250 4355571200 4356436150 4357301100 4358166050 4359031000 4359895950 4360760900 4361625850 4362490800 4363355750 4364220700 4365085650 4365950600 4366815550 4367680500 4368545450 4369410400 4370275350 4371140300 4372005250 4372870200 4373735150 4374600100 4375465050 4339335000 4340209950 4341084900 4341959850 4342834800 4343709750 4344584700 4345459650 4346334600 4347209550 4348084500 4348959450 4349834400 4350709350 4351584300 4352459250 4353334200 4354209150 4355084100 4355959050 4356834000 4357708950 4358583900 4359458850 4360333800 4361208750 4362083700 4362958650 4363833600 4364708550 4365583500 4366458450 4367333400 4368208350 4369083300 4369958250 4370833200 4371708150 4372583100 4373458050 4374333000 4375207950 4376082900 4376957850 4377832800 4378707750 4379582700 4380457650 4381332600 4382207550 4383082500 4383957450 4384832400 4385707350 4386582300 4387457250 4388332200 4389207150 4390082100 4390957050 4391832000 4392706950 4393581900 4394456850 4395331800 4396206750 4397081700 4397956650 4398831600 4399706550 4400581500 4401456450 4402331400 4403206350 4404081300 4404956250 4405831200 4406706150 4407581100 4408456050 4409331000 4410205950 4411080900 4411955850 4412830800 4413705750 4414580700 4415455650 4416330600 4417205550 4418080500 4418955450 4419830400 4420705350 4421580300 4422455250 4423330200 4424205150 4425080100 4425955050 4388835000 4389719950 4390604900 4391489850 4392374800 4393259750 4394144700 4395029650 4395914600 4396799550 4397684500 4398569450 4399454400 4400339350 4401224300 4402109250 4402994200 4403879150 4404764100 4405649050 4406534000 4407418950 4408303900 4409188850 4410073800 4410958750 4411843700 4412728650 4413613600 4414498550 4415383500 4416268450 4417153400 4418038350 4418923300 4419808250 4420693200 4421578150 4422463100 4423348050 4424233000 4425117950 4426002900 4426887850 4427772800 4428657750 4429542700 4430427650 4431312600 4432197550 4433082500 4433967450 4434852400 4435737350 4436622300 4437507250 4438392200 4439277150 4440162100 4441047050 4441932000 4442816950 4443701900 4444586850 4445471800 4446356750 4447241700 4448126650 4449011600 4449896550 4450781500 4451666450 4452551400 4453436350 4454321300 4455206250 4456091200 4456976150 4457861100 4458746050 4459631000 4460515950 4461400900 4462285850 4463170800 4464055750 4464940700 4465825650 4466710600 4467595550 4468480500 4469365450 4470250400 4471135350 4472020300 4472905250 4473790200 4474675150 4475560100 4476445050 4438335000 4439229950 4440124900 4441019850 4441914800 4442809750 4443704700 4444599650 4445494600 4446389550 4447284500 4448179450 4449074400 4449969350 4450864300 4451759250 4452654200 4453549150 4454444100 4455339050 4456234000 4457128950 4458023900 4458918850 4459813800 4460708750 4461603700 4462498650 4463393600 4464288550 4465183500 4466078450 4466973400 4467868350 4468763300 4469658250 4470553200 4471448150 4472343100 4473238050 4474133000 4475027950 4475922900 4476817850 4477712800 4478607750 4479502700 4480397650 4481292600 4482187550 4483082500 4483977450 4484872400 4485767350 4486662300 4487557250 4488452200 4489347150 4490242100 4491137050 4492032000 4492926950 4493821900 4494716850 4495611800 4496506750 4497401700 4498296650 4499191600 4500086550 4500981500 4501876450 4502771400 4503666350 4504561300 4505456250 4506351200 4507246150 4508141100 4509036050 4509931000 4510825950 4511720900 4512615850 4513510800 4514405750 4515300700 4516195650 4517090600 4517985550 4518880500 4519775450 4520670400 4521565350 4522460300 4523355250 4524250200 4525145150 4526040100 4526935050 4487835000 4488739950 4489644900 4490549850 4491454800 4492359750 4493264700 4494169650 4495074600 4495979550 4496884500 4497789450 4498694400 4499599350 4500504300 4501409250 4502314200 4503219150 4504124100 4505029050 4505934000 4506838950 4507743900 4508648850 4509553800 4510458750 4511363700 4512268650 4513173600 4514078550 4514983500 4515888450 4516793400 4517698350 4518603300 4519508250 4520413200 4521318150 4522223100 4523128050 4524033000 4524937950 4525842900 4526747850 4527652800 4528557750 4529462700 4530367650 4531272600 4532177550 4533082500 4533987450 4534892400 4535797350 4536702300 4537607250 4538512200 4539417150 4540322100 4541227050 4542132000 4543036950 4543941900 4544846850 4545751800 4546656750 4547561700 4548466650 4549371600 4550276550 4551181500 4552086450 4552991400 4553896350 4554801300 4555706250 4556611200 4557516150 4558421100 4559326050 4560231000 4561135950 4562040900 4562945850 4563850800 4564755750 4565660700 4566565650 4567470600 4568375550 4569280500 4570185450 4571090400 4571995350 4572900300 4573805250 4574710200 4575615150 4576520100 4577425050 4537335000 4538249950 4539164900 4540079850 4540994800 4541909750 4542824700 4543739650 4544654600 4545569550 4546484500 4547399450 4548314400 4549229350 4550144300 4551059250 4551974200 4552889150 4553804100 4554719050 4555634000 4556548950 4557463900 4558378850 4559293800 4560208750 4561123700 4562038650 4562953600 4563868550 4564783500 4565698450 4566613400 4567528350 4568443300 4569358250 4570273200 4571188150 4572103100 4573018050 4573933000 4574847950 4575762900 4576677850 4577592800 4578507750 4579422700 4580337650 4581252600 4582167550 4583082500 4583997450 4584912400 4585827350 4586742300 4587657250 4588572200 4589487150 4590402100 4591317050 4592232000 4593146950 4594061900 4594976850 4595891800 4596806750 4597721700 4598636650 4599551600 4600466550 4601381500 4602296450 4603211400 4604126350 4605041300 4605956250 4606871200 4607786150 4608701100 4609616050 4610531000 4611445950 4612360900 4613275850 4614190800 4615105750 4616020700 4616935650 4617850600 4618765550 4619680500 4620595450 4621510400 4622425350 4623340300 4624255250 4625170200 4626085150 4627000100 4627915050 4586835000 4587759950 4588684900 4589609850 4590534800 4591459750 4592384700 4593309650 4594234600 4595159550 4596084500 4597009450 4597934400 4598859350 4599784300 4600709250 4601634200 4602559150 4603484100 4604409050 4605334000 4606258950 4607183900 4608108850 4609033800 4609958750 4610883700 4611808650 4612733600 4613658550 4614583500 4615508450 4616433400 4617358350 4618283300 4619208250 4620133200 4621058150 4621983100 4622908050 4623833000 4624757950 4625682900 4626607850 4627532800 4628457750 4629382700 4630307650 4631232600 4632157550 4633082500 4634007450 4634932400 4635857350 4636782300 4637707250 4638632200 4639557150 4640482100 4641407050 4642332000 4643256950 4644181900 4645106850 4646031800 4646956750 4647881700 4648806650 4649731600 4650656550 4651581500 4652506450 4653431400 4654356350 4655281300 4656206250 4657131200 4658056150 4658981100 4659906050 4660831000 4661755950 4662680900 4663605850 4664530800 4665455750 4666380700 4667305650 4668230600 4669155550 4670080500 4671005450 4671930400 4672855350 4673780300 4674705250 4675630200 4676555150 4677480100 4678405050 4636335000 4637269950 4638204900 4639139850 4640074800 4641009750 4641944700 4642879650 4643814600 4644749550 4645684500 4646619450 4647554400 4648489350 4649424300 4650359250 4651294200 4652229150 4653164100 4654099050 4655034000 4655968950 4656903900 4657838850 4658773800 4659708750 4660643700 4661578650 4662513600 4663448550 4664383500 4665318450 4666253400 4667188350 4668123300 4669058250 4669993200 4670928150 4671863100 4672798050 4673733000 4674667950 4675602900 4676537850 4677472800 4678407750 4679342700 4680277650 4681212600 4682147550 4683082500 4684017450 4684952400 4685887350 4686822300 4687757250 4688692200 4689627150 4690562100 4691497050 4692432000 4693366950 4694301900 4695236850 4696171800 4697106750 4698041700 4698976650 4699911600 4700846550 4701781500 4702716450 4703651400 4704586350 4705521300 4706456250 4707391200 4708326150 4709261100 4710196050 4711131000 4712065950 4713000900 4713935850 4714870800 4715805750 4716740700 4717675650 4718610600 4719545550 4720480500 4721415450 4722350400 4723285350 4724220300 4725155250 4726090200 4727025150 4727960100 4728895050 4685835000 4686779950 4687724900 4688669850 4689614800 4690559750 4691504700 4692449650 4693394600 4694339550 4695284500 4696229450 4697174400 4698119350 4699064300 4700009250 4700954200 4701899150 4702844100 4703789050 4704734000 4705678950 4706623900 4707568850 4708513800 4709458750 4710403700 4711348650 4712293600 4713238550 4714183500 4715128450 4716073400 4717018350 4717963300 4718908250 4719853200 4720798150 4721743100 4722688050 4723633000 4724577950 4725522900 4726467850 4727412800 4728357750 4729302700 4730247650 4731192600 4732137550 4733082500 4734027450 4734972400 4735917350 4736862300 4737807250 4738752200 4739697150 4740642100 4741587050 4742532000 4743476950 4744421900 4745366850 4746311800 4747256750 4748201700 4749146650 4750091600 4751036550 4751981500 4752926450 4753871400 4754816350 4755761300 4756706250 4757651200 4758596150 4759541100 4760486050 4761431000 4762375950 4763320900 4764265850 4765210800 4766155750 4767100700 4768045650 4768990600 4769935550 4770880500 4771825450 4772770400 4773715350 4774660300 4775605250 4776550200 4777495150 4778440100 4779385050 4735335000 4736289950 4737244900 4738199850 4739154800 4740109750 4741064700 4742019650 4742974600 4743929550 4744884500 4745839450 4746794400 4747749350 4748704300 4749659250 4750614200 4751569150 4752524100 4753479050 4754434000 4755388950 4756343900 4757298850 4758253800 4759208750 4760163700 4761118650 4762073600 4763028550 4763983500 4764938450 4765893400 4766848350 4767803300 4768758250 4769713200 4770668150 4771623100 4772578050 4773533000 4774487950 4775442900 4776397850 4777352800 4778307750 4779262700 4780217650 4781172600 4782127550 4783082500 4784037450 4784992400 4785947350 4786902300 4787857250 4788812200 4789767150 4790722100 4791677050 4792632000 4793586950 4794541900 4795496850 4796451800 4797406750 4798361700 4799316650 4800271600 4801226550 4802181500 4803136450 4804091400 4805046350 4806001300 4806956250 4807911200 4808866150 4809821100 4810776050 4811731000 4812685950 4813640900 4814595850 4815550800 4816505750 4817460700 4818415650 4819370600 4820325550 4821280500 4822235450 4823190400 4824145350 4825100300 4826055250 4827010200 4827965150 4828920100 4829875050 4784835000 4785799950 4786764900 4787729850 4788694800 4789659750 4790624700 4791589650 4792554600 4793519550 4794484500 4795449450 4796414400 4797379350 4798344300 4799309250 4800274200 4801239150 4802204100 4803169050 4804134000 4805098950 4806063900 4807028850 4807993800 4808958750 4809923700 4810888650 4811853600 4812818550 4813783500 4814748450 4815713400 4816678350 4817643300 4818608250 4819573200 4820538150 4821503100 4822468050 4823433000 4824397950 4825362900 4826327850 4827292800 4828257750 4829222700 4830187650 4831152600 4832117550 4833082500 4834047450 4835012400 4835977350 4836942300 4837907250 4838872200 4839837150 4840802100 4841767050 4842732000 4843696950 4844661900 4845626850 4846591800 4847556750 4848521700 4849486650 4850451600 4851416550 4852381500 4853346450 4854311400 4855276350 4856241300 4857206250 4858171200 4859136150 4860101100 4861066050 4862031000 4862995950 4863960900 4864925850 4865890800 4866855750 4867820700 4868785650 4869750600 4870715550 4871680500 4872645450 4873610400 4874575350 4875540300 4876505250 4877470200 4878435150 4879400100 4880365050 4834335000 4835309950 4836284900 4837259850 4838234800 4839209750 4840184700 4841159650 4842134600 4843109550 4844084500 4845059450 4846034400 4847009350 4847984300 4848959250 4849934200 4850909150 4851884100 4852859050 4853834000 4854808950 4855783900 4856758850 4857733800 4858708750 4859683700 4860658650 4861633600 4862608550 4863583500 4864558450 4865533400 4866508350 4867483300 4868458250 4869433200 4870408150 4871383100 4872358050 4873333000 4874307950 4875282900 4876257850 4877232800 4878207750 4879182700 4880157650 4881132600 4882107550 4883082500 4884057450 4885032400 4886007350 4886982300 4887957250 4888932200 4889907150 4890882100 4891857050 4892832000 4893806950 4894781900 4895756850 4896731800 4897706750 4898681700 4899656650 4900631600 4901606550 4902581500 4903556450 4904531400 4905506350 4906481300 4907456250 4908431200 4909406150 4910381100 4911356050 4912331000 4913305950 4914280900 4915255850 4916230800 4917205750 4918180700 4919155650 4920130600 4921105550 4922080500 4923055450 4924030400 4925005350 4925980300 4926955250 4927930200 4928905150 4929880100 4930855050 4883835000 4884819950 4885804900 4886789850 4887774800 4888759750 4889744700 4890729650 4891714600 4892699550 4893684500 4894669450 4895654400 4896639350 4897624300 4898609250 4899594200 4900579150 4901564100 4902549050 4903534000 4904518950 4905503900 4906488850 4907473800 4908458750 4909443700 4910428650 4911413600 4912398550 4913383500 4914368450 4915353400 4916338350 4917323300 4918308250 4919293200 4920278150 4921263100 4922248050 4923233000 4924217950 4925202900 4926187850 4927172800 4928157750 4929142700 4930127650 4931112600 4932097550 4933082500 4934067450 4935052400 4936037350 4937022300 4938007250 4938992200 4939977150 4940962100 4941947050 4942932000 4943916950 4944901900 4945886850 4946871800 4947856750 4948841700 4949826650 4950811600 4951796550 4952781500 4953766450 4954751400 4955736350 4956721300 4957706250 4958691200 4959676150 4960661100 4961646050 4962631000 4963615950 4964600900 4965585850 4966570800 4967555750 4968540700 4969525650 4970510600 4971495550 4972480500 4973465450 4974450400 4975435350 4976420300 4977405250 4978390200 4979375150 4980360100 4981345050 4933335000 4934329950 4935324900 4936319850 4937314800 4938309750 4939304700 4940299650 4941294600 4942289550 4943284500 4944279450 4945274400 4946269350 4947264300 4948259250 4949254200 4950249150 4951244100 4952239050 4953234000 4954228950 4955223900 4956218850 4957213800 4958208750 4959203700 4960198650 4961193600 4962188550 4963183500 4964178450 4965173400 4966168350 4967163300 4968158250 4969153200 4970148150 4971143100 4972138050 4973133000 4974127950 4975122900 4976117850 4977112800 4978107750 4979102700 4980097650 4981092600 4982087550 4983082500 4984077450 4985072400 4986067350 4987062300 4988057250 4989052200 4990047150 4991042100 4992037050 4993032000 4994026950 4995021900 4996016850 4997011800 4998006750 4999001700 4999996650 5000991600 5001986550 5002981500 5003976450 5004971400 5005966350 5006961300 5007956250 5008951200 5009946150 5010941100 5011936050 5012931000 5013925950 5014920900 5015915850 5016910800 5017905750 5018900700 5019895650 5020890600 5021885550 5022880500 5023875450 5024870400 5025865350 5026860300 5027855250 5028850200 5029845150 5030840100 5031835050 ==13282== Profiling application: ./a.out ==13282== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 94.72% 2.5322ms 1 2.5322ms 2.5322ms 2.5322ms matproductsharedmemory(__int64*, __int64*, __int64*) 3.68% 98.338us 2 49.169us 49.025us 49.313us [CUDA memcpy HtoD] 1.61% 42.913us 1 42.913us 42.913us 42.913us [CUDA memcpy DtoH] API calls: 98.22% 189.54ms 3 63.178ms 5.3290us 189.52ms hipMalloc 1.43% 2.7661ms 3 922.02us 26.698us 2.6712ms hipMemcpy 0.19% 361.76us 94 3.8480us 170ns 233.68us hipDeviceGetAttribute 0.08% 150.22us 3 50.073us 6.2080us 110.67us hipFree 0.05% 89.941us 1 89.941us 89.941us 89.941us cuDeviceTotalMem 0.01% 27.216us 1 27.216us 27.216us 27.216us hipDeviceGetName 0.01% 24.939us 1 24.939us 24.939us 24.939us hipLaunch 0.00% 2.2690us 3 756ns 186ns 1.7650us hipGetDeviceCount 0.00% 1.0820us 2 541ns 239ns 843ns hipDeviceGet 0.00% 955ns 3 318ns 172ns 542ns hipSetupArgument 0.00% 724ns 1 724ns 724ns 724ns hipConfigureCall */
24755245ab34874536920df8fad957c4c3af017a.cu
#include<stdio.h> #include<cuda.h> #define row1 10 /* Number of rows of first matrix */ #define col1 10 /* Number of columns of first matrix */ #define row2 10 /* Number of rows of second matrix */ #define col2 10 /* Number of columns of second matrix */ typedef long long int LLI; __global__ void matproductsharedmemory(LLI *l,LLI *m, LLI *n) { LLI x=blockIdx.x; LLI y=blockIdx.y; __shared__ LLI p[col1]; LLI i; LLI k=threadIdx.x; n[col2*y+x]=0; p[k]=l[col1*y+k]*m[col2*k+x]; __syncthreads(); for(i=0;i<col1;i++) n[col2*y+x]=n[col2*y+x]+p[i]; } int main() { LLI a[row1][col1]; LLI b[row2][col2]; LLI c[row1][col2]; LLI *d,*e,*f; LLI i,j; // prLLIf("\n Enter elements of first matrix of size 2*3\n"); for(i=0;i<row1;i++) { for(j=0;j<col1;j++) { a[i][j]= i*row1+j; } } // prLLIf("\n Enter elements of second matrix of size 3*2\n"); for(i=0;i<row2;i++) { for(j=0;j<col2;j++) { b[i][j]=i*row2+j; } } cudaMalloc((void **)&d,row1*col1*sizeof(LLI)); cudaMalloc((void **)&e,row2*col2*sizeof(LLI)); cudaMalloc((void **)&f,row1*col2*sizeof(LLI)); cudaMemcpy(d,a,row1*col1*sizeof(LLI),cudaMemcpyHostToDevice); cudaMemcpy(e,b,row2*col2*sizeof(LLI),cudaMemcpyHostToDevice); dim3 grid(col2,row1); /* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */ matproductsharedmemory<<<grid,col1>>>(d,e,f); cudaMemcpy(c,f,row1*col2*sizeof(LLI),cudaMemcpyDeviceToHost); printf("\n Product of two matrices:\n "); for(i=0;i<row1;i++) { for(j=0;j<col2;j++) { printf("%Ld\t",c[i][j]); } printf("\n"); } cudaFree(d); cudaFree(e); cudaFree(f); return 0; } /* OUTPUT profile ==13282== NVPROF is profiling process 13282, command: ./a.out Product of two matrices: 32835000 32839950 32844900 32849850 32854800 32859750 32864700 32869650 32874600 32879550 32884500 32889450 32894400 32899350 32904300 32909250 32914200 32919150 32924100 32929050 32934000 32938950 32943900 32948850 32953800 32958750 32963700 32968650 32973600 32978550 32983500 32988450 32993400 32998350 33003300 33008250 33013200 33018150 33023100 33028050 33033000 33037950 33042900 33047850 33052800 33057750 33062700 33067650 33072600 33077550 33082500 33087450 33092400 33097350 33102300 33107250 33112200 33117150 33122100 33127050 33132000 33136950 33141900 33146850 33151800 33156750 33161700 33166650 33171600 33176550 33181500 33186450 33191400 33196350 33201300 33206250 33211200 33216150 33221100 33226050 33231000 33235950 33240900 33245850 33250800 33255750 33260700 33265650 33270600 33275550 33280500 33285450 33290400 33295350 33300300 33305250 33310200 33315150 33320100 33325050 82335000 82349950 82364900 82379850 82394800 82409750 82424700 82439650 82454600 82469550 82484500 82499450 82514400 82529350 82544300 82559250 82574200 82589150 82604100 82619050 82634000 82648950 82663900 82678850 82693800 82708750 82723700 82738650 82753600 82768550 82783500 82798450 82813400 82828350 82843300 82858250 82873200 82888150 82903100 82918050 82933000 82947950 82962900 82977850 82992800 83007750 83022700 83037650 83052600 83067550 83082500 83097450 83112400 83127350 83142300 83157250 83172200 83187150 83202100 83217050 83232000 83246950 83261900 83276850 83291800 83306750 83321700 83336650 83351600 83366550 83381500 83396450 83411400 83426350 83441300 83456250 83471200 83486150 83501100 83516050 83531000 83545950 83560900 83575850 83590800 83605750 83620700 83635650 83650600 83665550 83680500 83695450 83710400 83725350 83740300 83755250 83770200 83785150 83800100 83815050 131835000 131859950 131884900 131909850 131934800 131959750 131984700 132009650 132034600 132059550 132084500 132109450 132134400 132159350 132184300 132209250 132234200 132259150 132284100 132309050 132334000 132358950 132383900 132408850 132433800 132458750 132483700 132508650 132533600 132558550 132583500 132608450 132633400 132658350 132683300 132708250 132733200 132758150 132783100 132808050 132833000 132857950 132882900 132907850 132932800 132957750 132982700 133007650 133032600 133057550 133082500 133107450 133132400 133157350 133182300 133207250 133232200 133257150 133282100 133307050 133332000 133356950 133381900 133406850 133431800 133456750 133481700 133506650 133531600 133556550 133581500 133606450 133631400 133656350 133681300 133706250 133731200 133756150 133781100 133806050 133831000 133855950 133880900 133905850 133930800 133955750 133980700 134005650 134030600 134055550 134080500 134105450 134130400 134155350 134180300 134205250 134230200 134255150 134280100 134305050 181335000 181369950 181404900 181439850 181474800 181509750 181544700 181579650 181614600 181649550 181684500 181719450 181754400 181789350 181824300 181859250 181894200 181929150 181964100 181999050 182034000 182068950 182103900 182138850 182173800 182208750 182243700 182278650 182313600 182348550 182383500 182418450 182453400 182488350 182523300 182558250 182593200 182628150 182663100 182698050 182733000 182767950 182802900 182837850 182872800 182907750 182942700 182977650 183012600 183047550 183082500 183117450 183152400 183187350 183222300 183257250 183292200 183327150 183362100 183397050 183432000 183466950 183501900 183536850 183571800 183606750 183641700 183676650 183711600 183746550 183781500 183816450 183851400 183886350 183921300 183956250 183991200 184026150 184061100 184096050 184131000 184165950 184200900 184235850 184270800 184305750 184340700 184375650 184410600 184445550 184480500 184515450 184550400 184585350 184620300 184655250 184690200 184725150 184760100 184795050 230835000 230879950 230924900 230969850 231014800 231059750 231104700 231149650 231194600 231239550 231284500 231329450 231374400 231419350 231464300 231509250 231554200 231599150 231644100 231689050 231734000 231778950 231823900 231868850 231913800 231958750 232003700 232048650 232093600 232138550 232183500 232228450 232273400 232318350 232363300 232408250 232453200 232498150 232543100 232588050 232633000 232677950 232722900 232767850 232812800 232857750 232902700 232947650 232992600 233037550 233082500 233127450 233172400 233217350 233262300 233307250 233352200 233397150 233442100 233487050 233532000 233576950 233621900 233666850 233711800 233756750 233801700 233846650 233891600 233936550 233981500 234026450 234071400 234116350 234161300 234206250 234251200 234296150 234341100 234386050 234431000 234475950 234520900 234565850 234610800 234655750 234700700 234745650 234790600 234835550 234880500 234925450 234970400 235015350 235060300 235105250 235150200 235195150 235240100 235285050 280335000 280389950 280444900 280499850 280554800 280609750 280664700 280719650 280774600 280829550 280884500 280939450 280994400 281049350 281104300 281159250 281214200 281269150 281324100 281379050 281434000 281488950 281543900 281598850 281653800 281708750 281763700 281818650 281873600 281928550 281983500 282038450 282093400 282148350 282203300 282258250 282313200 282368150 282423100 282478050 282533000 282587950 282642900 282697850 282752800 282807750 282862700 282917650 282972600 283027550 283082500 283137450 283192400 283247350 283302300 283357250 283412200 283467150 283522100 283577050 283632000 283686950 283741900 283796850 283851800 283906750 283961700 284016650 284071600 284126550 284181500 284236450 284291400 284346350 284401300 284456250 284511200 284566150 284621100 284676050 284731000 284785950 284840900 284895850 284950800 285005750 285060700 285115650 285170600 285225550 285280500 285335450 285390400 285445350 285500300 285555250 285610200 285665150 285720100 285775050 329835000 329899950 329964900 330029850 330094800 330159750 330224700 330289650 330354600 330419550 330484500 330549450 330614400 330679350 330744300 330809250 330874200 330939150 331004100 331069050 331134000 331198950 331263900 331328850 331393800 331458750 331523700 331588650 331653600 331718550 331783500 331848450 331913400 331978350 332043300 332108250 332173200 332238150 332303100 332368050 332433000 332497950 332562900 332627850 332692800 332757750 332822700 332887650 332952600 333017550 333082500 333147450 333212400 333277350 333342300 333407250 333472200 333537150 333602100 333667050 333732000 333796950 333861900 333926850 333991800 334056750 334121700 334186650 334251600 334316550 334381500 334446450 334511400 334576350 334641300 334706250 334771200 334836150 334901100 334966050 335031000 335095950 335160900 335225850 335290800 335355750 335420700 335485650 335550600 335615550 335680500 335745450 335810400 335875350 335940300 336005250 336070200 336135150 336200100 336265050 379335000 379409950 379484900 379559850 379634800 379709750 379784700 379859650 379934600 380009550 380084500 380159450 380234400 380309350 380384300 380459250 380534200 380609150 380684100 380759050 380834000 380908950 380983900 381058850 381133800 381208750 381283700 381358650 381433600 381508550 381583500 381658450 381733400 381808350 381883300 381958250 382033200 382108150 382183100 382258050 382333000 382407950 382482900 382557850 382632800 382707750 382782700 382857650 382932600 383007550 383082500 383157450 383232400 383307350 383382300 383457250 383532200 383607150 383682100 383757050 383832000 383906950 383981900 384056850 384131800 384206750 384281700 384356650 384431600 384506550 384581500 384656450 384731400 384806350 384881300 384956250 385031200 385106150 385181100 385256050 385331000 385405950 385480900 385555850 385630800 385705750 385780700 385855650 385930600 386005550 386080500 386155450 386230400 386305350 386380300 386455250 386530200 386605150 386680100 386755050 428835000 428919950 429004900 429089850 429174800 429259750 429344700 429429650 429514600 429599550 429684500 429769450 429854400 429939350 430024300 430109250 430194200 430279150 430364100 430449050 430534000 430618950 430703900 430788850 430873800 430958750 431043700 431128650 431213600 431298550 431383500 431468450 431553400 431638350 431723300 431808250 431893200 431978150 432063100 432148050 432233000 432317950 432402900 432487850 432572800 432657750 432742700 432827650 432912600 432997550 433082500 433167450 433252400 433337350 433422300 433507250 433592200 433677150 433762100 433847050 433932000 434016950 434101900 434186850 434271800 434356750 434441700 434526650 434611600 434696550 434781500 434866450 434951400 435036350 435121300 435206250 435291200 435376150 435461100 435546050 435631000 435715950 435800900 435885850 435970800 436055750 436140700 436225650 436310600 436395550 436480500 436565450 436650400 436735350 436820300 436905250 436990200 437075150 437160100 437245050 478335000 478429950 478524900 478619850 478714800 478809750 478904700 478999650 479094600 479189550 479284500 479379450 479474400 479569350 479664300 479759250 479854200 479949150 480044100 480139050 480234000 480328950 480423900 480518850 480613800 480708750 480803700 480898650 480993600 481088550 481183500 481278450 481373400 481468350 481563300 481658250 481753200 481848150 481943100 482038050 482133000 482227950 482322900 482417850 482512800 482607750 482702700 482797650 482892600 482987550 483082500 483177450 483272400 483367350 483462300 483557250 483652200 483747150 483842100 483937050 484032000 484126950 484221900 484316850 484411800 484506750 484601700 484696650 484791600 484886550 484981500 485076450 485171400 485266350 485361300 485456250 485551200 485646150 485741100 485836050 485931000 486025950 486120900 486215850 486310800 486405750 486500700 486595650 486690600 486785550 486880500 486975450 487070400 487165350 487260300 487355250 487450200 487545150 487640100 487735050 527835000 527939950 528044900 528149850 528254800 528359750 528464700 528569650 528674600 528779550 528884500 528989450 529094400 529199350 529304300 529409250 529514200 529619150 529724100 529829050 529934000 530038950 530143900 530248850 530353800 530458750 530563700 530668650 530773600 530878550 530983500 531088450 531193400 531298350 531403300 531508250 531613200 531718150 531823100 531928050 532033000 532137950 532242900 532347850 532452800 532557750 532662700 532767650 532872600 532977550 533082500 533187450 533292400 533397350 533502300 533607250 533712200 533817150 533922100 534027050 534132000 534236950 534341900 534446850 534551800 534656750 534761700 534866650 534971600 535076550 535181500 535286450 535391400 535496350 535601300 535706250 535811200 535916150 536021100 536126050 536231000 536335950 536440900 536545850 536650800 536755750 536860700 536965650 537070600 537175550 537280500 537385450 537490400 537595350 537700300 537805250 537910200 538015150 538120100 538225050 577335000 577449950 577564900 577679850 577794800 577909750 578024700 578139650 578254600 578369550 578484500 578599450 578714400 578829350 578944300 579059250 579174200 579289150 579404100 579519050 579634000 579748950 579863900 579978850 580093800 580208750 580323700 580438650 580553600 580668550 580783500 580898450 581013400 581128350 581243300 581358250 581473200 581588150 581703100 581818050 581933000 582047950 582162900 582277850 582392800 582507750 582622700 582737650 582852600 582967550 583082500 583197450 583312400 583427350 583542300 583657250 583772200 583887150 584002100 584117050 584232000 584346950 584461900 584576850 584691800 584806750 584921700 585036650 585151600 585266550 585381500 585496450 585611400 585726350 585841300 585956250 586071200 586186150 586301100 586416050 586531000 586645950 586760900 586875850 586990800 587105750 587220700 587335650 587450600 587565550 587680500 587795450 587910400 588025350 588140300 588255250 588370200 588485150 588600100 588715050 626835000 626959950 627084900 627209850 627334800 627459750 627584700 627709650 627834600 627959550 628084500 628209450 628334400 628459350 628584300 628709250 628834200 628959150 629084100 629209050 629334000 629458950 629583900 629708850 629833800 629958750 630083700 630208650 630333600 630458550 630583500 630708450 630833400 630958350 631083300 631208250 631333200 631458150 631583100 631708050 631833000 631957950 632082900 632207850 632332800 632457750 632582700 632707650 632832600 632957550 633082500 633207450 633332400 633457350 633582300 633707250 633832200 633957150 634082100 634207050 634332000 634456950 634581900 634706850 634831800 634956750 635081700 635206650 635331600 635456550 635581500 635706450 635831400 635956350 636081300 636206250 636331200 636456150 636581100 636706050 636831000 636955950 637080900 637205850 637330800 637455750 637580700 637705650 637830600 637955550 638080500 638205450 638330400 638455350 638580300 638705250 638830200 638955150 639080100 639205050 676335000 676469950 676604900 676739850 676874800 677009750 677144700 677279650 677414600 677549550 677684500 677819450 677954400 678089350 678224300 678359250 678494200 678629150 678764100 678899050 679034000 679168950 679303900 679438850 679573800 679708750 679843700 679978650 680113600 680248550 680383500 680518450 680653400 680788350 680923300 681058250 681193200 681328150 681463100 681598050 681733000 681867950 682002900 682137850 682272800 682407750 682542700 682677650 682812600 682947550 683082500 683217450 683352400 683487350 683622300 683757250 683892200 684027150 684162100 684297050 684432000 684566950 684701900 684836850 684971800 685106750 685241700 685376650 685511600 685646550 685781500 685916450 686051400 686186350 686321300 686456250 686591200 686726150 686861100 686996050 687131000 687265950 687400900 687535850 687670800 687805750 687940700 688075650 688210600 688345550 688480500 688615450 688750400 688885350 689020300 689155250 689290200 689425150 689560100 689695050 725835000 725979950 726124900 726269850 726414800 726559750 726704700 726849650 726994600 727139550 727284500 727429450 727574400 727719350 727864300 728009250 728154200 728299150 728444100 728589050 728734000 728878950 729023900 729168850 729313800 729458750 729603700 729748650 729893600 730038550 730183500 730328450 730473400 730618350 730763300 730908250 731053200 731198150 731343100 731488050 731633000 731777950 731922900 732067850 732212800 732357750 732502700 732647650 732792600 732937550 733082500 733227450 733372400 733517350 733662300 733807250 733952200 734097150 734242100 734387050 734532000 734676950 734821900 734966850 735111800 735256750 735401700 735546650 735691600 735836550 735981500 736126450 736271400 736416350 736561300 736706250 736851200 736996150 737141100 737286050 737431000 737575950 737720900 737865850 738010800 738155750 738300700 738445650 738590600 738735550 738880500 739025450 739170400 739315350 739460300 739605250 739750200 739895150 740040100 740185050 775335000 775489950 775644900 775799850 775954800 776109750 776264700 776419650 776574600 776729550 776884500 777039450 777194400 777349350 777504300 777659250 777814200 777969150 778124100 778279050 778434000 778588950 778743900 778898850 779053800 779208750 779363700 779518650 779673600 779828550 779983500 780138450 780293400 780448350 780603300 780758250 780913200 781068150 781223100 781378050 781533000 781687950 781842900 781997850 782152800 782307750 782462700 782617650 782772600 782927550 783082500 783237450 783392400 783547350 783702300 783857250 784012200 784167150 784322100 784477050 784632000 784786950 784941900 785096850 785251800 785406750 785561700 785716650 785871600 786026550 786181500 786336450 786491400 786646350 786801300 786956250 787111200 787266150 787421100 787576050 787731000 787885950 788040900 788195850 788350800 788505750 788660700 788815650 788970600 789125550 789280500 789435450 789590400 789745350 789900300 790055250 790210200 790365150 790520100 790675050 824835000 824999950 825164900 825329850 825494800 825659750 825824700 825989650 826154600 826319550 826484500 826649450 826814400 826979350 827144300 827309250 827474200 827639150 827804100 827969050 828134000 828298950 828463900 828628850 828793800 828958750 829123700 829288650 829453600 829618550 829783500 829948450 830113400 830278350 830443300 830608250 830773200 830938150 831103100 831268050 831433000 831597950 831762900 831927850 832092800 832257750 832422700 832587650 832752600 832917550 833082500 833247450 833412400 833577350 833742300 833907250 834072200 834237150 834402100 834567050 834732000 834896950 835061900 835226850 835391800 835556750 835721700 835886650 836051600 836216550 836381500 836546450 836711400 836876350 837041300 837206250 837371200 837536150 837701100 837866050 838031000 838195950 838360900 838525850 838690800 838855750 839020700 839185650 839350600 839515550 839680500 839845450 840010400 840175350 840340300 840505250 840670200 840835150 841000100 841165050 874335000 874509950 874684900 874859850 875034800 875209750 875384700 875559650 875734600 875909550 876084500 876259450 876434400 876609350 876784300 876959250 877134200 877309150 877484100 877659050 877834000 878008950 878183900 878358850 878533800 878708750 878883700 879058650 879233600 879408550 879583500 879758450 879933400 880108350 880283300 880458250 880633200 880808150 880983100 881158050 881333000 881507950 881682900 881857850 882032800 882207750 882382700 882557650 882732600 882907550 883082500 883257450 883432400 883607350 883782300 883957250 884132200 884307150 884482100 884657050 884832000 885006950 885181900 885356850 885531800 885706750 885881700 886056650 886231600 886406550 886581500 886756450 886931400 887106350 887281300 887456250 887631200 887806150 887981100 888156050 888331000 888505950 888680900 888855850 889030800 889205750 889380700 889555650 889730600 889905550 890080500 890255450 890430400 890605350 890780300 890955250 891130200 891305150 891480100 891655050 923835000 924019950 924204900 924389850 924574800 924759750 924944700 925129650 925314600 925499550 925684500 925869450 926054400 926239350 926424300 926609250 926794200 926979150 927164100 927349050 927534000 927718950 927903900 928088850 928273800 928458750 928643700 928828650 929013600 929198550 929383500 929568450 929753400 929938350 930123300 930308250 930493200 930678150 930863100 931048050 931233000 931417950 931602900 931787850 931972800 932157750 932342700 932527650 932712600 932897550 933082500 933267450 933452400 933637350 933822300 934007250 934192200 934377150 934562100 934747050 934932000 935116950 935301900 935486850 935671800 935856750 936041700 936226650 936411600 936596550 936781500 936966450 937151400 937336350 937521300 937706250 937891200 938076150 938261100 938446050 938631000 938815950 939000900 939185850 939370800 939555750 939740700 939925650 940110600 940295550 940480500 940665450 940850400 941035350 941220300 941405250 941590200 941775150 941960100 942145050 973335000 973529950 973724900 973919850 974114800 974309750 974504700 974699650 974894600 975089550 975284500 975479450 975674400 975869350 976064300 976259250 976454200 976649150 976844100 977039050 977234000 977428950 977623900 977818850 978013800 978208750 978403700 978598650 978793600 978988550 979183500 979378450 979573400 979768350 979963300 980158250 980353200 980548150 980743100 980938050 981133000 981327950 981522900 981717850 981912800 982107750 982302700 982497650 982692600 982887550 983082500 983277450 983472400 983667350 983862300 984057250 984252200 984447150 984642100 984837050 985032000 985226950 985421900 985616850 985811800 986006750 986201700 986396650 986591600 986786550 986981500 987176450 987371400 987566350 987761300 987956250 988151200 988346150 988541100 988736050 988931000 989125950 989320900 989515850 989710800 989905750 990100700 990295650 990490600 990685550 990880500 991075450 991270400 991465350 991660300 991855250 992050200 992245150 992440100 992635050 1022835000 1023039950 1023244900 1023449850 1023654800 1023859750 1024064700 1024269650 1024474600 1024679550 1024884500 1025089450 1025294400 1025499350 1025704300 1025909250 1026114200 1026319150 1026524100 1026729050 1026934000 1027138950 1027343900 1027548850 1027753800 1027958750 1028163700 1028368650 1028573600 1028778550 1028983500 1029188450 1029393400 1029598350 1029803300 1030008250 1030213200 1030418150 1030623100 1030828050 1031033000 1031237950 1031442900 1031647850 1031852800 1032057750 1032262700 1032467650 1032672600 1032877550 1033082500 1033287450 1033492400 1033697350 1033902300 1034107250 1034312200 1034517150 1034722100 1034927050 1035132000 1035336950 1035541900 1035746850 1035951800 1036156750 1036361700 1036566650 1036771600 1036976550 1037181500 1037386450 1037591400 1037796350 1038001300 1038206250 1038411200 1038616150 1038821100 1039026050 1039231000 1039435950 1039640900 1039845850 1040050800 1040255750 1040460700 1040665650 1040870600 1041075550 1041280500 1041485450 1041690400 1041895350 1042100300 1042305250 1042510200 1042715150 1042920100 1043125050 1072335000 1072549950 1072764900 1072979850 1073194800 1073409750 1073624700 1073839650 1074054600 1074269550 1074484500 1074699450 1074914400 1075129350 1075344300 1075559250 1075774200 1075989150 1076204100 1076419050 1076634000 1076848950 1077063900 1077278850 1077493800 1077708750 1077923700 1078138650 1078353600 1078568550 1078783500 1078998450 1079213400 1079428350 1079643300 1079858250 1080073200 1080288150 1080503100 1080718050 1080933000 1081147950 1081362900 1081577850 1081792800 1082007750 1082222700 1082437650 1082652600 1082867550 1083082500 1083297450 1083512400 1083727350 1083942300 1084157250 1084372200 1084587150 1084802100 1085017050 1085232000 1085446950 1085661900 1085876850 1086091800 1086306750 1086521700 1086736650 1086951600 1087166550 1087381500 1087596450 1087811400 1088026350 1088241300 1088456250 1088671200 1088886150 1089101100 1089316050 1089531000 1089745950 1089960900 1090175850 1090390800 1090605750 1090820700 1091035650 1091250600 1091465550 1091680500 1091895450 1092110400 1092325350 1092540300 1092755250 1092970200 1093185150 1093400100 1093615050 1121835000 1122059950 1122284900 1122509850 1122734800 1122959750 1123184700 1123409650 1123634600 1123859550 1124084500 1124309450 1124534400 1124759350 1124984300 1125209250 1125434200 1125659150 1125884100 1126109050 1126334000 1126558950 1126783900 1127008850 1127233800 1127458750 1127683700 1127908650 1128133600 1128358550 1128583500 1128808450 1129033400 1129258350 1129483300 1129708250 1129933200 1130158150 1130383100 1130608050 1130833000 1131057950 1131282900 1131507850 1131732800 1131957750 1132182700 1132407650 1132632600 1132857550 1133082500 1133307450 1133532400 1133757350 1133982300 1134207250 1134432200 1134657150 1134882100 1135107050 1135332000 1135556950 1135781900 1136006850 1136231800 1136456750 1136681700 1136906650 1137131600 1137356550 1137581500 1137806450 1138031400 1138256350 1138481300 1138706250 1138931200 1139156150 1139381100 1139606050 1139831000 1140055950 1140280900 1140505850 1140730800 1140955750 1141180700 1141405650 1141630600 1141855550 1142080500 1142305450 1142530400 1142755350 1142980300 1143205250 1143430200 1143655150 1143880100 1144105050 1171335000 1171569950 1171804900 1172039850 1172274800 1172509750 1172744700 1172979650 1173214600 1173449550 1173684500 1173919450 1174154400 1174389350 1174624300 1174859250 1175094200 1175329150 1175564100 1175799050 1176034000 1176268950 1176503900 1176738850 1176973800 1177208750 1177443700 1177678650 1177913600 1178148550 1178383500 1178618450 1178853400 1179088350 1179323300 1179558250 1179793200 1180028150 1180263100 1180498050 1180733000 1180967950 1181202900 1181437850 1181672800 1181907750 1182142700 1182377650 1182612600 1182847550 1183082500 1183317450 1183552400 1183787350 1184022300 1184257250 1184492200 1184727150 1184962100 1185197050 1185432000 1185666950 1185901900 1186136850 1186371800 1186606750 1186841700 1187076650 1187311600 1187546550 1187781500 1188016450 1188251400 1188486350 1188721300 1188956250 1189191200 1189426150 1189661100 1189896050 1190131000 1190365950 1190600900 1190835850 1191070800 1191305750 1191540700 1191775650 1192010600 1192245550 1192480500 1192715450 1192950400 1193185350 1193420300 1193655250 1193890200 1194125150 1194360100 1194595050 1220835000 1221079950 1221324900 1221569850 1221814800 1222059750 1222304700 1222549650 1222794600 1223039550 1223284500 1223529450 1223774400 1224019350 1224264300 1224509250 1224754200 1224999150 1225244100 1225489050 1225734000 1225978950 1226223900 1226468850 1226713800 1226958750 1227203700 1227448650 1227693600 1227938550 1228183500 1228428450 1228673400 1228918350 1229163300 1229408250 1229653200 1229898150 1230143100 1230388050 1230633000 1230877950 1231122900 1231367850 1231612800 1231857750 1232102700 1232347650 1232592600 1232837550 1233082500 1233327450 1233572400 1233817350 1234062300 1234307250 1234552200 1234797150 1235042100 1235287050 1235532000 1235776950 1236021900 1236266850 1236511800 1236756750 1237001700 1237246650 1237491600 1237736550 1237981500 1238226450 1238471400 1238716350 1238961300 1239206250 1239451200 1239696150 1239941100 1240186050 1240431000 1240675950 1240920900 1241165850 1241410800 1241655750 1241900700 1242145650 1242390600 1242635550 1242880500 1243125450 1243370400 1243615350 1243860300 1244105250 1244350200 1244595150 1244840100 1245085050 1270335000 1270589950 1270844900 1271099850 1271354800 1271609750 1271864700 1272119650 1272374600 1272629550 1272884500 1273139450 1273394400 1273649350 1273904300 1274159250 1274414200 1274669150 1274924100 1275179050 1275434000 1275688950 1275943900 1276198850 1276453800 1276708750 1276963700 1277218650 1277473600 1277728550 1277983500 1278238450 1278493400 1278748350 1279003300 1279258250 1279513200 1279768150 1280023100 1280278050 1280533000 1280787950 1281042900 1281297850 1281552800 1281807750 1282062700 1282317650 1282572600 1282827550 1283082500 1283337450 1283592400 1283847350 1284102300 1284357250 1284612200 1284867150 1285122100 1285377050 1285632000 1285886950 1286141900 1286396850 1286651800 1286906750 1287161700 1287416650 1287671600 1287926550 1288181500 1288436450 1288691400 1288946350 1289201300 1289456250 1289711200 1289966150 1290221100 1290476050 1290731000 1290985950 1291240900 1291495850 1291750800 1292005750 1292260700 1292515650 1292770600 1293025550 1293280500 1293535450 1293790400 1294045350 1294300300 1294555250 1294810200 1295065150 1295320100 1295575050 1319835000 1320099950 1320364900 1320629850 1320894800 1321159750 1321424700 1321689650 1321954600 1322219550 1322484500 1322749450 1323014400 1323279350 1323544300 1323809250 1324074200 1324339150 1324604100 1324869050 1325134000 1325398950 1325663900 1325928850 1326193800 1326458750 1326723700 1326988650 1327253600 1327518550 1327783500 1328048450 1328313400 1328578350 1328843300 1329108250 1329373200 1329638150 1329903100 1330168050 1330433000 1330697950 1330962900 1331227850 1331492800 1331757750 1332022700 1332287650 1332552600 1332817550 1333082500 1333347450 1333612400 1333877350 1334142300 1334407250 1334672200 1334937150 1335202100 1335467050 1335732000 1335996950 1336261900 1336526850 1336791800 1337056750 1337321700 1337586650 1337851600 1338116550 1338381500 1338646450 1338911400 1339176350 1339441300 1339706250 1339971200 1340236150 1340501100 1340766050 1341031000 1341295950 1341560900 1341825850 1342090800 1342355750 1342620700 1342885650 1343150600 1343415550 1343680500 1343945450 1344210400 1344475350 1344740300 1345005250 1345270200 1345535150 1345800100 1346065050 1369335000 1369609950 1369884900 1370159850 1370434800 1370709750 1370984700 1371259650 1371534600 1371809550 1372084500 1372359450 1372634400 1372909350 1373184300 1373459250 1373734200 1374009150 1374284100 1374559050 1374834000 1375108950 1375383900 1375658850 1375933800 1376208750 1376483700 1376758650 1377033600 1377308550 1377583500 1377858450 1378133400 1378408350 1378683300 1378958250 1379233200 1379508150 1379783100 1380058050 1380333000 1380607950 1380882900 1381157850 1381432800 1381707750 1381982700 1382257650 1382532600 1382807550 1383082500 1383357450 1383632400 1383907350 1384182300 1384457250 1384732200 1385007150 1385282100 1385557050 1385832000 1386106950 1386381900 1386656850 1386931800 1387206750 1387481700 1387756650 1388031600 1388306550 1388581500 1388856450 1389131400 1389406350 1389681300 1389956250 1390231200 1390506150 1390781100 1391056050 1391331000 1391605950 1391880900 1392155850 1392430800 1392705750 1392980700 1393255650 1393530600 1393805550 1394080500 1394355450 1394630400 1394905350 1395180300 1395455250 1395730200 1396005150 1396280100 1396555050 1418835000 1419119950 1419404900 1419689850 1419974800 1420259750 1420544700 1420829650 1421114600 1421399550 1421684500 1421969450 1422254400 1422539350 1422824300 1423109250 1423394200 1423679150 1423964100 1424249050 1424534000 1424818950 1425103900 1425388850 1425673800 1425958750 1426243700 1426528650 1426813600 1427098550 1427383500 1427668450 1427953400 1428238350 1428523300 1428808250 1429093200 1429378150 1429663100 1429948050 1430233000 1430517950 1430802900 1431087850 1431372800 1431657750 1431942700 1432227650 1432512600 1432797550 1433082500 1433367450 1433652400 1433937350 1434222300 1434507250 1434792200 1435077150 1435362100 1435647050 1435932000 1436216950 1436501900 1436786850 1437071800 1437356750 1437641700 1437926650 1438211600 1438496550 1438781500 1439066450 1439351400 1439636350 1439921300 1440206250 1440491200 1440776150 1441061100 1441346050 1441631000 1441915950 1442200900 1442485850 1442770800 1443055750 1443340700 1443625650 1443910600 1444195550 1444480500 1444765450 1445050400 1445335350 1445620300 1445905250 1446190200 1446475150 1446760100 1447045050 1468335000 1468629950 1468924900 1469219850 1469514800 1469809750 1470104700 1470399650 1470694600 1470989550 1471284500 1471579450 1471874400 1472169350 1472464300 1472759250 1473054200 1473349150 1473644100 1473939050 1474234000 1474528950 1474823900 1475118850 1475413800 1475708750 1476003700 1476298650 1476593600 1476888550 1477183500 1477478450 1477773400 1478068350 1478363300 1478658250 1478953200 1479248150 1479543100 1479838050 1480133000 1480427950 1480722900 1481017850 1481312800 1481607750 1481902700 1482197650 1482492600 1482787550 1483082500 1483377450 1483672400 1483967350 1484262300 1484557250 1484852200 1485147150 1485442100 1485737050 1486032000 1486326950 1486621900 1486916850 1487211800 1487506750 1487801700 1488096650 1488391600 1488686550 1488981500 1489276450 1489571400 1489866350 1490161300 1490456250 1490751200 1491046150 1491341100 1491636050 1491931000 1492225950 1492520900 1492815850 1493110800 1493405750 1493700700 1493995650 1494290600 1494585550 1494880500 1495175450 1495470400 1495765350 1496060300 1496355250 1496650200 1496945150 1497240100 1497535050 1517835000 1518139950 1518444900 1518749850 1519054800 1519359750 1519664700 1519969650 1520274600 1520579550 1520884500 1521189450 1521494400 1521799350 1522104300 1522409250 1522714200 1523019150 1523324100 1523629050 1523934000 1524238950 1524543900 1524848850 1525153800 1525458750 1525763700 1526068650 1526373600 1526678550 1526983500 1527288450 1527593400 1527898350 1528203300 1528508250 1528813200 1529118150 1529423100 1529728050 1530033000 1530337950 1530642900 1530947850 1531252800 1531557750 1531862700 1532167650 1532472600 1532777550 1533082500 1533387450 1533692400 1533997350 1534302300 1534607250 1534912200 1535217150 1535522100 1535827050 1536132000 1536436950 1536741900 1537046850 1537351800 1537656750 1537961700 1538266650 1538571600 1538876550 1539181500 1539486450 1539791400 1540096350 1540401300 1540706250 1541011200 1541316150 1541621100 1541926050 1542231000 1542535950 1542840900 1543145850 1543450800 1543755750 1544060700 1544365650 1544670600 1544975550 1545280500 1545585450 1545890400 1546195350 1546500300 1546805250 1547110200 1547415150 1547720100 1548025050 1567335000 1567649950 1567964900 1568279850 1568594800 1568909750 1569224700 1569539650 1569854600 1570169550 1570484500 1570799450 1571114400 1571429350 1571744300 1572059250 1572374200 1572689150 1573004100 1573319050 1573634000 1573948950 1574263900 1574578850 1574893800 1575208750 1575523700 1575838650 1576153600 1576468550 1576783500 1577098450 1577413400 1577728350 1578043300 1578358250 1578673200 1578988150 1579303100 1579618050 1579933000 1580247950 1580562900 1580877850 1581192800 1581507750 1581822700 1582137650 1582452600 1582767550 1583082500 1583397450 1583712400 1584027350 1584342300 1584657250 1584972200 1585287150 1585602100 1585917050 1586232000 1586546950 1586861900 1587176850 1587491800 1587806750 1588121700 1588436650 1588751600 1589066550 1589381500 1589696450 1590011400 1590326350 1590641300 1590956250 1591271200 1591586150 1591901100 1592216050 1592531000 1592845950 1593160900 1593475850 1593790800 1594105750 1594420700 1594735650 1595050600 1595365550 1595680500 1595995450 1596310400 1596625350 1596940300 1597255250 1597570200 1597885150 1598200100 1598515050 1616835000 1617159950 1617484900 1617809850 1618134800 1618459750 1618784700 1619109650 1619434600 1619759550 1620084500 1620409450 1620734400 1621059350 1621384300 1621709250 1622034200 1622359150 1622684100 1623009050 1623334000 1623658950 1623983900 1624308850 1624633800 1624958750 1625283700 1625608650 1625933600 1626258550 1626583500 1626908450 1627233400 1627558350 1627883300 1628208250 1628533200 1628858150 1629183100 1629508050 1629833000 1630157950 1630482900 1630807850 1631132800 1631457750 1631782700 1632107650 1632432600 1632757550 1633082500 1633407450 1633732400 1634057350 1634382300 1634707250 1635032200 1635357150 1635682100 1636007050 1636332000 1636656950 1636981900 1637306850 1637631800 1637956750 1638281700 1638606650 1638931600 1639256550 1639581500 1639906450 1640231400 1640556350 1640881300 1641206250 1641531200 1641856150 1642181100 1642506050 1642831000 1643155950 1643480900 1643805850 1644130800 1644455750 1644780700 1645105650 1645430600 1645755550 1646080500 1646405450 1646730400 1647055350 1647380300 1647705250 1648030200 1648355150 1648680100 1649005050 1666335000 1666669950 1667004900 1667339850 1667674800 1668009750 1668344700 1668679650 1669014600 1669349550 1669684500 1670019450 1670354400 1670689350 1671024300 1671359250 1671694200 1672029150 1672364100 1672699050 1673034000 1673368950 1673703900 1674038850 1674373800 1674708750 1675043700 1675378650 1675713600 1676048550 1676383500 1676718450 1677053400 1677388350 1677723300 1678058250 1678393200 1678728150 1679063100 1679398050 1679733000 1680067950 1680402900 1680737850 1681072800 1681407750 1681742700 1682077650 1682412600 1682747550 1683082500 1683417450 1683752400 1684087350 1684422300 1684757250 1685092200 1685427150 1685762100 1686097050 1686432000 1686766950 1687101900 1687436850 1687771800 1688106750 1688441700 1688776650 1689111600 1689446550 1689781500 1690116450 1690451400 1690786350 1691121300 1691456250 1691791200 1692126150 1692461100 1692796050 1693131000 1693465950 1693800900 1694135850 1694470800 1694805750 1695140700 1695475650 1695810600 1696145550 1696480500 1696815450 1697150400 1697485350 1697820300 1698155250 1698490200 1698825150 1699160100 1699495050 1715835000 1716179950 1716524900 1716869850 1717214800 1717559750 1717904700 1718249650 1718594600 1718939550 1719284500 1719629450 1719974400 1720319350 1720664300 1721009250 1721354200 1721699150 1722044100 1722389050 1722734000 1723078950 1723423900 1723768850 1724113800 1724458750 1724803700 1725148650 1725493600 1725838550 1726183500 1726528450 1726873400 1727218350 1727563300 1727908250 1728253200 1728598150 1728943100 1729288050 1729633000 1729977950 1730322900 1730667850 1731012800 1731357750 1731702700 1732047650 1732392600 1732737550 1733082500 1733427450 1733772400 1734117350 1734462300 1734807250 1735152200 1735497150 1735842100 1736187050 1736532000 1736876950 1737221900 1737566850 1737911800 1738256750 1738601700 1738946650 1739291600 1739636550 1739981500 1740326450 1740671400 1741016350 1741361300 1741706250 1742051200 1742396150 1742741100 1743086050 1743431000 1743775950 1744120900 1744465850 1744810800 1745155750 1745500700 1745845650 1746190600 1746535550 1746880500 1747225450 1747570400 1747915350 1748260300 1748605250 1748950200 1749295150 1749640100 1749985050 1765335000 1765689950 1766044900 1766399850 1766754800 1767109750 1767464700 1767819650 1768174600 1768529550 1768884500 1769239450 1769594400 1769949350 1770304300 1770659250 1771014200 1771369150 1771724100 1772079050 1772434000 1772788950 1773143900 1773498850 1773853800 1774208750 1774563700 1774918650 1775273600 1775628550 1775983500 1776338450 1776693400 1777048350 1777403300 1777758250 1778113200 1778468150 1778823100 1779178050 1779533000 1779887950 1780242900 1780597850 1780952800 1781307750 1781662700 1782017650 1782372600 1782727550 1783082500 1783437450 1783792400 1784147350 1784502300 1784857250 1785212200 1785567150 1785922100 1786277050 1786632000 1786986950 1787341900 1787696850 1788051800 1788406750 1788761700 1789116650 1789471600 1789826550 1790181500 1790536450 1790891400 1791246350 1791601300 1791956250 1792311200 1792666150 1793021100 1793376050 1793731000 1794085950 1794440900 1794795850 1795150800 1795505750 1795860700 1796215650 1796570600 1796925550 1797280500 1797635450 1797990400 1798345350 1798700300 1799055250 1799410200 1799765150 1800120100 1800475050 1814835000 1815199950 1815564900 1815929850 1816294800 1816659750 1817024700 1817389650 1817754600 1818119550 1818484500 1818849450 1819214400 1819579350 1819944300 1820309250 1820674200 1821039150 1821404100 1821769050 1822134000 1822498950 1822863900 1823228850 1823593800 1823958750 1824323700 1824688650 1825053600 1825418550 1825783500 1826148450 1826513400 1826878350 1827243300 1827608250 1827973200 1828338150 1828703100 1829068050 1829433000 1829797950 1830162900 1830527850 1830892800 1831257750 1831622700 1831987650 1832352600 1832717550 1833082500 1833447450 1833812400 1834177350 1834542300 1834907250 1835272200 1835637150 1836002100 1836367050 1836732000 1837096950 1837461900 1837826850 1838191800 1838556750 1838921700 1839286650 1839651600 1840016550 1840381500 1840746450 1841111400 1841476350 1841841300 1842206250 1842571200 1842936150 1843301100 1843666050 1844031000 1844395950 1844760900 1845125850 1845490800 1845855750 1846220700 1846585650 1846950600 1847315550 1847680500 1848045450 1848410400 1848775350 1849140300 1849505250 1849870200 1850235150 1850600100 1850965050 1864335000 1864709950 1865084900 1865459850 1865834800 1866209750 1866584700 1866959650 1867334600 1867709550 1868084500 1868459450 1868834400 1869209350 1869584300 1869959250 1870334200 1870709150 1871084100 1871459050 1871834000 1872208950 1872583900 1872958850 1873333800 1873708750 1874083700 1874458650 1874833600 1875208550 1875583500 1875958450 1876333400 1876708350 1877083300 1877458250 1877833200 1878208150 1878583100 1878958050 1879333000 1879707950 1880082900 1880457850 1880832800 1881207750 1881582700 1881957650 1882332600 1882707550 1883082500 1883457450 1883832400 1884207350 1884582300 1884957250 1885332200 1885707150 1886082100 1886457050 1886832000 1887206950 1887581900 1887956850 1888331800 1888706750 1889081700 1889456650 1889831600 1890206550 1890581500 1890956450 1891331400 1891706350 1892081300 1892456250 1892831200 1893206150 1893581100 1893956050 1894331000 1894705950 1895080900 1895455850 1895830800 1896205750 1896580700 1896955650 1897330600 1897705550 1898080500 1898455450 1898830400 1899205350 1899580300 1899955250 1900330200 1900705150 1901080100 1901455050 1913835000 1914219950 1914604900 1914989850 1915374800 1915759750 1916144700 1916529650 1916914600 1917299550 1917684500 1918069450 1918454400 1918839350 1919224300 1919609250 1919994200 1920379150 1920764100 1921149050 1921534000 1921918950 1922303900 1922688850 1923073800 1923458750 1923843700 1924228650 1924613600 1924998550 1925383500 1925768450 1926153400 1926538350 1926923300 1927308250 1927693200 1928078150 1928463100 1928848050 1929233000 1929617950 1930002900 1930387850 1930772800 1931157750 1931542700 1931927650 1932312600 1932697550 1933082500 1933467450 1933852400 1934237350 1934622300 1935007250 1935392200 1935777150 1936162100 1936547050 1936932000 1937316950 1937701900 1938086850 1938471800 1938856750 1939241700 1939626650 1940011600 1940396550 1940781500 1941166450 1941551400 1941936350 1942321300 1942706250 1943091200 1943476150 1943861100 1944246050 1944631000 1945015950 1945400900 1945785850 1946170800 1946555750 1946940700 1947325650 1947710600 1948095550 1948480500 1948865450 1949250400 1949635350 1950020300 1950405250 1950790200 1951175150 1951560100 1951945050 1963335000 1963729950 1964124900 1964519850 1964914800 1965309750 1965704700 1966099650 1966494600 1966889550 1967284500 1967679450 1968074400 1968469350 1968864300 1969259250 1969654200 1970049150 1970444100 1970839050 1971234000 1971628950 1972023900 1972418850 1972813800 1973208750 1973603700 1973998650 1974393600 1974788550 1975183500 1975578450 1975973400 1976368350 1976763300 1977158250 1977553200 1977948150 1978343100 1978738050 1979133000 1979527950 1979922900 1980317850 1980712800 1981107750 1981502700 1981897650 1982292600 1982687550 1983082500 1983477450 1983872400 1984267350 1984662300 1985057250 1985452200 1985847150 1986242100 1986637050 1987032000 1987426950 1987821900 1988216850 1988611800 1989006750 1989401700 1989796650 1990191600 1990586550 1990981500 1991376450 1991771400 1992166350 1992561300 1992956250 1993351200 1993746150 1994141100 1994536050 1994931000 1995325950 1995720900 1996115850 1996510800 1996905750 1997300700 1997695650 1998090600 1998485550 1998880500 1999275450 1999670400 2000065350 2000460300 2000855250 2001250200 2001645150 2002040100 2002435050 2012835000 2013239950 2013644900 2014049850 2014454800 2014859750 2015264700 2015669650 2016074600 2016479550 2016884500 2017289450 2017694400 2018099350 2018504300 2018909250 2019314200 2019719150 2020124100 2020529050 2020934000 2021338950 2021743900 2022148850 2022553800 2022958750 2023363700 2023768650 2024173600 2024578550 2024983500 2025388450 2025793400 2026198350 2026603300 2027008250 2027413200 2027818150 2028223100 2028628050 2029033000 2029437950 2029842900 2030247850 2030652800 2031057750 2031462700 2031867650 2032272600 2032677550 2033082500 2033487450 2033892400 2034297350 2034702300 2035107250 2035512200 2035917150 2036322100 2036727050 2037132000 2037536950 2037941900 2038346850 2038751800 2039156750 2039561700 2039966650 2040371600 2040776550 2041181500 2041586450 2041991400 2042396350 2042801300 2043206250 2043611200 2044016150 2044421100 2044826050 2045231000 2045635950 2046040900 2046445850 2046850800 2047255750 2047660700 2048065650 2048470600 2048875550 2049280500 2049685450 2050090400 2050495350 2050900300 2051305250 2051710200 2052115150 2052520100 2052925050 2062335000 2062749950 2063164900 2063579850 2063994800 2064409750 2064824700 2065239650 2065654600 2066069550 2066484500 2066899450 2067314400 2067729350 2068144300 2068559250 2068974200 2069389150 2069804100 2070219050 2070634000 2071048950 2071463900 2071878850 2072293800 2072708750 2073123700 2073538650 2073953600 2074368550 2074783500 2075198450 2075613400 2076028350 2076443300 2076858250 2077273200 2077688150 2078103100 2078518050 2078933000 2079347950 2079762900 2080177850 2080592800 2081007750 2081422700 2081837650 2082252600 2082667550 2083082500 2083497450 2083912400 2084327350 2084742300 2085157250 2085572200 2085987150 2086402100 2086817050 2087232000 2087646950 2088061900 2088476850 2088891800 2089306750 2089721700 2090136650 2090551600 2090966550 2091381500 2091796450 2092211400 2092626350 2093041300 2093456250 2093871200 2094286150 2094701100 2095116050 2095531000 2095945950 2096360900 2096775850 2097190800 2097605750 2098020700 2098435650 2098850600 2099265550 2099680500 2100095450 2100510400 2100925350 2101340300 2101755250 2102170200 2102585150 2103000100 2103415050 2111835000 2112259950 2112684900 2113109850 2113534800 2113959750 2114384700 2114809650 2115234600 2115659550 2116084500 2116509450 2116934400 2117359350 2117784300 2118209250 2118634200 2119059150 2119484100 2119909050 2120334000 2120758950 2121183900 2121608850 2122033800 2122458750 2122883700 2123308650 2123733600 2124158550 2124583500 2125008450 2125433400 2125858350 2126283300 2126708250 2127133200 2127558150 2127983100 2128408050 2128833000 2129257950 2129682900 2130107850 2130532800 2130957750 2131382700 2131807650 2132232600 2132657550 2133082500 2133507450 2133932400 2134357350 2134782300 2135207250 2135632200 2136057150 2136482100 2136907050 2137332000 2137756950 2138181900 2138606850 2139031800 2139456750 2139881700 2140306650 2140731600 2141156550 2141581500 2142006450 2142431400 2142856350 2143281300 2143706250 2144131200 2144556150 2144981100 2145406050 2145831000 2146255950 2146680900 2147105850 2147530800 2147955750 2148380700 2148805650 2149230600 2149655550 2150080500 2150505450 2150930400 2151355350 2151780300 2152205250 2152630200 2153055150 2153480100 2153905050 2161335000 2161769950 2162204900 2162639850 2163074800 2163509750 2163944700 2164379650 2164814600 2165249550 2165684500 2166119450 2166554400 2166989350 2167424300 2167859250 2168294200 2168729150 2169164100 2169599050 2170034000 2170468950 2170903900 2171338850 2171773800 2172208750 2172643700 2173078650 2173513600 2173948550 2174383500 2174818450 2175253400 2175688350 2176123300 2176558250 2176993200 2177428150 2177863100 2178298050 2178733000 2179167950 2179602900 2180037850 2180472800 2180907750 2181342700 2181777650 2182212600 2182647550 2183082500 2183517450 2183952400 2184387350 2184822300 2185257250 2185692200 2186127150 2186562100 2186997050 2187432000 2187866950 2188301900 2188736850 2189171800 2189606750 2190041700 2190476650 2190911600 2191346550 2191781500 2192216450 2192651400 2193086350 2193521300 2193956250 2194391200 2194826150 2195261100 2195696050 2196131000 2196565950 2197000900 2197435850 2197870800 2198305750 2198740700 2199175650 2199610600 2200045550 2200480500 2200915450 2201350400 2201785350 2202220300 2202655250 2203090200 2203525150 2203960100 2204395050 2210835000 2211279950 2211724900 2212169850 2212614800 2213059750 2213504700 2213949650 2214394600 2214839550 2215284500 2215729450 2216174400 2216619350 2217064300 2217509250 2217954200 2218399150 2218844100 2219289050 2219734000 2220178950 2220623900 2221068850 2221513800 2221958750 2222403700 2222848650 2223293600 2223738550 2224183500 2224628450 2225073400 2225518350 2225963300 2226408250 2226853200 2227298150 2227743100 2228188050 2228633000 2229077950 2229522900 2229967850 2230412800 2230857750 2231302700 2231747650 2232192600 2232637550 2233082500 2233527450 2233972400 2234417350 2234862300 2235307250 2235752200 2236197150 2236642100 2237087050 2237532000 2237976950 2238421900 2238866850 2239311800 2239756750 2240201700 2240646650 2241091600 2241536550 2241981500 2242426450 2242871400 2243316350 2243761300 2244206250 2244651200 2245096150 2245541100 2245986050 2246431000 2246875950 2247320900 2247765850 2248210800 2248655750 2249100700 2249545650 2249990600 2250435550 2250880500 2251325450 2251770400 2252215350 2252660300 2253105250 2253550200 2253995150 2254440100 2254885050 2260335000 2260789950 2261244900 2261699850 2262154800 2262609750 2263064700 2263519650 2263974600 2264429550 2264884500 2265339450 2265794400 2266249350 2266704300 2267159250 2267614200 2268069150 2268524100 2268979050 2269434000 2269888950 2270343900 2270798850 2271253800 2271708750 2272163700 2272618650 2273073600 2273528550 2273983500 2274438450 2274893400 2275348350 2275803300 2276258250 2276713200 2277168150 2277623100 2278078050 2278533000 2278987950 2279442900 2279897850 2280352800 2280807750 2281262700 2281717650 2282172600 2282627550 2283082500 2283537450 2283992400 2284447350 2284902300 2285357250 2285812200 2286267150 2286722100 2287177050 2287632000 2288086950 2288541900 2288996850 2289451800 2289906750 2290361700 2290816650 2291271600 2291726550 2292181500 2292636450 2293091400 2293546350 2294001300 2294456250 2294911200 2295366150 2295821100 2296276050 2296731000 2297185950 2297640900 2298095850 2298550800 2299005750 2299460700 2299915650 2300370600 2300825550 2301280500 2301735450 2302190400 2302645350 2303100300 2303555250 2304010200 2304465150 2304920100 2305375050 2309835000 2310299950 2310764900 2311229850 2311694800 2312159750 2312624700 2313089650 2313554600 2314019550 2314484500 2314949450 2315414400 2315879350 2316344300 2316809250 2317274200 2317739150 2318204100 2318669050 2319134000 2319598950 2320063900 2320528850 2320993800 2321458750 2321923700 2322388650 2322853600 2323318550 2323783500 2324248450 2324713400 2325178350 2325643300 2326108250 2326573200 2327038150 2327503100 2327968050 2328433000 2328897950 2329362900 2329827850 2330292800 2330757750 2331222700 2331687650 2332152600 2332617550 2333082500 2333547450 2334012400 2334477350 2334942300 2335407250 2335872200 2336337150 2336802100 2337267050 2337732000 2338196950 2338661900 2339126850 2339591800 2340056750 2340521700 2340986650 2341451600 2341916550 2342381500 2342846450 2343311400 2343776350 2344241300 2344706250 2345171200 2345636150 2346101100 2346566050 2347031000 2347495950 2347960900 2348425850 2348890800 2349355750 2349820700 2350285650 2350750600 2351215550 2351680500 2352145450 2352610400 2353075350 2353540300 2354005250 2354470200 2354935150 2355400100 2355865050 2359335000 2359809950 2360284900 2360759850 2361234800 2361709750 2362184700 2362659650 2363134600 2363609550 2364084500 2364559450 2365034400 2365509350 2365984300 2366459250 2366934200 2367409150 2367884100 2368359050 2368834000 2369308950 2369783900 2370258850 2370733800 2371208750 2371683700 2372158650 2372633600 2373108550 2373583500 2374058450 2374533400 2375008350 2375483300 2375958250 2376433200 2376908150 2377383100 2377858050 2378333000 2378807950 2379282900 2379757850 2380232800 2380707750 2381182700 2381657650 2382132600 2382607550 2383082500 2383557450 2384032400 2384507350 2384982300 2385457250 2385932200 2386407150 2386882100 2387357050 2387832000 2388306950 2388781900 2389256850 2389731800 2390206750 2390681700 2391156650 2391631600 2392106550 2392581500 2393056450 2393531400 2394006350 2394481300 2394956250 2395431200 2395906150 2396381100 2396856050 2397331000 2397805950 2398280900 2398755850 2399230800 2399705750 2400180700 2400655650 2401130600 2401605550 2402080500 2402555450 2403030400 2403505350 2403980300 2404455250 2404930200 2405405150 2405880100 2406355050 2408835000 2409319950 2409804900 2410289850 2410774800 2411259750 2411744700 2412229650 2412714600 2413199550 2413684500 2414169450 2414654400 2415139350 2415624300 2416109250 2416594200 2417079150 2417564100 2418049050 2418534000 2419018950 2419503900 2419988850 2420473800 2420958750 2421443700 2421928650 2422413600 2422898550 2423383500 2423868450 2424353400 2424838350 2425323300 2425808250 2426293200 2426778150 2427263100 2427748050 2428233000 2428717950 2429202900 2429687850 2430172800 2430657750 2431142700 2431627650 2432112600 2432597550 2433082500 2433567450 2434052400 2434537350 2435022300 2435507250 2435992200 2436477150 2436962100 2437447050 2437932000 2438416950 2438901900 2439386850 2439871800 2440356750 2440841700 2441326650 2441811600 2442296550 2442781500 2443266450 2443751400 2444236350 2444721300 2445206250 2445691200 2446176150 2446661100 2447146050 2447631000 2448115950 2448600900 2449085850 2449570800 2450055750 2450540700 2451025650 2451510600 2451995550 2452480500 2452965450 2453450400 2453935350 2454420300 2454905250 2455390200 2455875150 2456360100 2456845050 2458335000 2458829950 2459324900 2459819850 2460314800 2460809750 2461304700 2461799650 2462294600 2462789550 2463284500 2463779450 2464274400 2464769350 2465264300 2465759250 2466254200 2466749150 2467244100 2467739050 2468234000 2468728950 2469223900 2469718850 2470213800 2470708750 2471203700 2471698650 2472193600 2472688550 2473183500 2473678450 2474173400 2474668350 2475163300 2475658250 2476153200 2476648150 2477143100 2477638050 2478133000 2478627950 2479122900 2479617850 2480112800 2480607750 2481102700 2481597650 2482092600 2482587550 2483082500 2483577450 2484072400 2484567350 2485062300 2485557250 2486052200 2486547150 2487042100 2487537050 2488032000 2488526950 2489021900 2489516850 2490011800 2490506750 2491001700 2491496650 2491991600 2492486550 2492981500 2493476450 2493971400 2494466350 2494961300 2495456250 2495951200 2496446150 2496941100 2497436050 2497931000 2498425950 2498920900 2499415850 2499910800 2500405750 2500900700 2501395650 2501890600 2502385550 2502880500 2503375450 2503870400 2504365350 2504860300 2505355250 2505850200 2506345150 2506840100 2507335050 2507835000 2508339950 2508844900 2509349850 2509854800 2510359750 2510864700 2511369650 2511874600 2512379550 2512884500 2513389450 2513894400 2514399350 2514904300 2515409250 2515914200 2516419150 2516924100 2517429050 2517934000 2518438950 2518943900 2519448850 2519953800 2520458750 2520963700 2521468650 2521973600 2522478550 2522983500 2523488450 2523993400 2524498350 2525003300 2525508250 2526013200 2526518150 2527023100 2527528050 2528033000 2528537950 2529042900 2529547850 2530052800 2530557750 2531062700 2531567650 2532072600 2532577550 2533082500 2533587450 2534092400 2534597350 2535102300 2535607250 2536112200 2536617150 2537122100 2537627050 2538132000 2538636950 2539141900 2539646850 2540151800 2540656750 2541161700 2541666650 2542171600 2542676550 2543181500 2543686450 2544191400 2544696350 2545201300 2545706250 2546211200 2546716150 2547221100 2547726050 2548231000 2548735950 2549240900 2549745850 2550250800 2550755750 2551260700 2551765650 2552270600 2552775550 2553280500 2553785450 2554290400 2554795350 2555300300 2555805250 2556310200 2556815150 2557320100 2557825050 2557335000 2557849950 2558364900 2558879850 2559394800 2559909750 2560424700 2560939650 2561454600 2561969550 2562484500 2562999450 2563514400 2564029350 2564544300 2565059250 2565574200 2566089150 2566604100 2567119050 2567634000 2568148950 2568663900 2569178850 2569693800 2570208750 2570723700 2571238650 2571753600 2572268550 2572783500 2573298450 2573813400 2574328350 2574843300 2575358250 2575873200 2576388150 2576903100 2577418050 2577933000 2578447950 2578962900 2579477850 2579992800 2580507750 2581022700 2581537650 2582052600 2582567550 2583082500 2583597450 2584112400 2584627350 2585142300 2585657250 2586172200 2586687150 2587202100 2587717050 2588232000 2588746950 2589261900 2589776850 2590291800 2590806750 2591321700 2591836650 2592351600 2592866550 2593381500 2593896450 2594411400 2594926350 2595441300 2595956250 2596471200 2596986150 2597501100 2598016050 2598531000 2599045950 2599560900 2600075850 2600590800 2601105750 2601620700 2602135650 2602650600 2603165550 2603680500 2604195450 2604710400 2605225350 2605740300 2606255250 2606770200 2607285150 2607800100 2608315050 2606835000 2607359950 2607884900 2608409850 2608934800 2609459750 2609984700 2610509650 2611034600 2611559550 2612084500 2612609450 2613134400 2613659350 2614184300 2614709250 2615234200 2615759150 2616284100 2616809050 2617334000 2617858950 2618383900 2618908850 2619433800 2619958750 2620483700 2621008650 2621533600 2622058550 2622583500 2623108450 2623633400 2624158350 2624683300 2625208250 2625733200 2626258150 2626783100 2627308050 2627833000 2628357950 2628882900 2629407850 2629932800 2630457750 2630982700 2631507650 2632032600 2632557550 2633082500 2633607450 2634132400 2634657350 2635182300 2635707250 2636232200 2636757150 2637282100 2637807050 2638332000 2638856950 2639381900 2639906850 2640431800 2640956750 2641481700 2642006650 2642531600 2643056550 2643581500 2644106450 2644631400 2645156350 2645681300 2646206250 2646731200 2647256150 2647781100 2648306050 2648831000 2649355950 2649880900 2650405850 2650930800 2651455750 2651980700 2652505650 2653030600 2653555550 2654080500 2654605450 2655130400 2655655350 2656180300 2656705250 2657230200 2657755150 2658280100 2658805050 2656335000 2656869950 2657404900 2657939850 2658474800 2659009750 2659544700 2660079650 2660614600 2661149550 2661684500 2662219450 2662754400 2663289350 2663824300 2664359250 2664894200 2665429150 2665964100 2666499050 2667034000 2667568950 2668103900 2668638850 2669173800 2669708750 2670243700 2670778650 2671313600 2671848550 2672383500 2672918450 2673453400 2673988350 2674523300 2675058250 2675593200 2676128150 2676663100 2677198050 2677733000 2678267950 2678802900 2679337850 2679872800 2680407750 2680942700 2681477650 2682012600 2682547550 2683082500 2683617450 2684152400 2684687350 2685222300 2685757250 2686292200 2686827150 2687362100 2687897050 2688432000 2688966950 2689501900 2690036850 2690571800 2691106750 2691641700 2692176650 2692711600 2693246550 2693781500 2694316450 2694851400 2695386350 2695921300 2696456250 2696991200 2697526150 2698061100 2698596050 2699131000 2699665950 2700200900 2700735850 2701270800 2701805750 2702340700 2702875650 2703410600 2703945550 2704480500 2705015450 2705550400 2706085350 2706620300 2707155250 2707690200 2708225150 2708760100 2709295050 2705835000 2706379950 2706924900 2707469850 2708014800 2708559750 2709104700 2709649650 2710194600 2710739550 2711284500 2711829450 2712374400 2712919350 2713464300 2714009250 2714554200 2715099150 2715644100 2716189050 2716734000 2717278950 2717823900 2718368850 2718913800 2719458750 2720003700 2720548650 2721093600 2721638550 2722183500 2722728450 2723273400 2723818350 2724363300 2724908250 2725453200 2725998150 2726543100 2727088050 2727633000 2728177950 2728722900 2729267850 2729812800 2730357750 2730902700 2731447650 2731992600 2732537550 2733082500 2733627450 2734172400 2734717350 2735262300 2735807250 2736352200 2736897150 2737442100 2737987050 2738532000 2739076950 2739621900 2740166850 2740711800 2741256750 2741801700 2742346650 2742891600 2743436550 2743981500 2744526450 2745071400 2745616350 2746161300 2746706250 2747251200 2747796150 2748341100 2748886050 2749431000 2749975950 2750520900 2751065850 2751610800 2752155750 2752700700 2753245650 2753790600 2754335550 2754880500 2755425450 2755970400 2756515350 2757060300 2757605250 2758150200 2758695150 2759240100 2759785050 2755335000 2755889950 2756444900 2756999850 2757554800 2758109750 2758664700 2759219650 2759774600 2760329550 2760884500 2761439450 2761994400 2762549350 2763104300 2763659250 2764214200 2764769150 2765324100 2765879050 2766434000 2766988950 2767543900 2768098850 2768653800 2769208750 2769763700 2770318650 2770873600 2771428550 2771983500 2772538450 2773093400 2773648350 2774203300 2774758250 2775313200 2775868150 2776423100 2776978050 2777533000 2778087950 2778642900 2779197850 2779752800 2780307750 2780862700 2781417650 2781972600 2782527550 2783082500 2783637450 2784192400 2784747350 2785302300 2785857250 2786412200 2786967150 2787522100 2788077050 2788632000 2789186950 2789741900 2790296850 2790851800 2791406750 2791961700 2792516650 2793071600 2793626550 2794181500 2794736450 2795291400 2795846350 2796401300 2796956250 2797511200 2798066150 2798621100 2799176050 2799731000 2800285950 2800840900 2801395850 2801950800 2802505750 2803060700 2803615650 2804170600 2804725550 2805280500 2805835450 2806390400 2806945350 2807500300 2808055250 2808610200 2809165150 2809720100 2810275050 2804835000 2805399950 2805964900 2806529850 2807094800 2807659750 2808224700 2808789650 2809354600 2809919550 2810484500 2811049450 2811614400 2812179350 2812744300 2813309250 2813874200 2814439150 2815004100 2815569050 2816134000 2816698950 2817263900 2817828850 2818393800 2818958750 2819523700 2820088650 2820653600 2821218550 2821783500 2822348450 2822913400 2823478350 2824043300 2824608250 2825173200 2825738150 2826303100 2826868050 2827433000 2827997950 2828562900 2829127850 2829692800 2830257750 2830822700 2831387650 2831952600 2832517550 2833082500 2833647450 2834212400 2834777350 2835342300 2835907250 2836472200 2837037150 2837602100 2838167050 2838732000 2839296950 2839861900 2840426850 2840991800 2841556750 2842121700 2842686650 2843251600 2843816550 2844381500 2844946450 2845511400 2846076350 2846641300 2847206250 2847771200 2848336150 2848901100 2849466050 2850031000 2850595950 2851160900 2851725850 2852290800 2852855750 2853420700 2853985650 2854550600 2855115550 2855680500 2856245450 2856810400 2857375350 2857940300 2858505250 2859070200 2859635150 2860200100 2860765050 2854335000 2854909950 2855484900 2856059850 2856634800 2857209750 2857784700 2858359650 2858934600 2859509550 2860084500 2860659450 2861234400 2861809350 2862384300 2862959250 2863534200 2864109150 2864684100 2865259050 2865834000 2866408950 2866983900 2867558850 2868133800 2868708750 2869283700 2869858650 2870433600 2871008550 2871583500 2872158450 2872733400 2873308350 2873883300 2874458250 2875033200 2875608150 2876183100 2876758050 2877333000 2877907950 2878482900 2879057850 2879632800 2880207750 2880782700 2881357650 2881932600 2882507550 2883082500 2883657450 2884232400 2884807350 2885382300 2885957250 2886532200 2887107150 2887682100 2888257050 2888832000 2889406950 2889981900 2890556850 2891131800 2891706750 2892281700 2892856650 2893431600 2894006550 2894581500 2895156450 2895731400 2896306350 2896881300 2897456250 2898031200 2898606150 2899181100 2899756050 2900331000 2900905950 2901480900 2902055850 2902630800 2903205750 2903780700 2904355650 2904930600 2905505550 2906080500 2906655450 2907230400 2907805350 2908380300 2908955250 2909530200 2910105150 2910680100 2911255050 2903835000 2904419950 2905004900 2905589850 2906174800 2906759750 2907344700 2907929650 2908514600 2909099550 2909684500 2910269450 2910854400 2911439350 2912024300 2912609250 2913194200 2913779150 2914364100 2914949050 2915534000 2916118950 2916703900 2917288850 2917873800 2918458750 2919043700 2919628650 2920213600 2920798550 2921383500 2921968450 2922553400 2923138350 2923723300 2924308250 2924893200 2925478150 2926063100 2926648050 2927233000 2927817950 2928402900 2928987850 2929572800 2930157750 2930742700 2931327650 2931912600 2932497550 2933082500 2933667450 2934252400 2934837350 2935422300 2936007250 2936592200 2937177150 2937762100 2938347050 2938932000 2939516950 2940101900 2940686850 2941271800 2941856750 2942441700 2943026650 2943611600 2944196550 2944781500 2945366450 2945951400 2946536350 2947121300 2947706250 2948291200 2948876150 2949461100 2950046050 2950631000 2951215950 2951800900 2952385850 2952970800 2953555750 2954140700 2954725650 2955310600 2955895550 2956480500 2957065450 2957650400 2958235350 2958820300 2959405250 2959990200 2960575150 2961160100 2961745050 2953335000 2953929950 2954524900 2955119850 2955714800 2956309750 2956904700 2957499650 2958094600 2958689550 2959284500 2959879450 2960474400 2961069350 2961664300 2962259250 2962854200 2963449150 2964044100 2964639050 2965234000 2965828950 2966423900 2967018850 2967613800 2968208750 2968803700 2969398650 2969993600 2970588550 2971183500 2971778450 2972373400 2972968350 2973563300 2974158250 2974753200 2975348150 2975943100 2976538050 2977133000 2977727950 2978322900 2978917850 2979512800 2980107750 2980702700 2981297650 2981892600 2982487550 2983082500 2983677450 2984272400 2984867350 2985462300 2986057250 2986652200 2987247150 2987842100 2988437050 2989032000 2989626950 2990221900 2990816850 2991411800 2992006750 2992601700 2993196650 2993791600 2994386550 2994981500 2995576450 2996171400 2996766350 2997361300 2997956250 2998551200 2999146150 2999741100 3000336050 3000931000 3001525950 3002120900 3002715850 3003310800 3003905750 3004500700 3005095650 3005690600 3006285550 3006880500 3007475450 3008070400 3008665350 3009260300 3009855250 3010450200 3011045150 3011640100 3012235050 3002835000 3003439950 3004044900 3004649850 3005254800 3005859750 3006464700 3007069650 3007674600 3008279550 3008884500 3009489450 3010094400 3010699350 3011304300 3011909250 3012514200 3013119150 3013724100 3014329050 3014934000 3015538950 3016143900 3016748850 3017353800 3017958750 3018563700 3019168650 3019773600 3020378550 3020983500 3021588450 3022193400 3022798350 3023403300 3024008250 3024613200 3025218150 3025823100 3026428050 3027033000 3027637950 3028242900 3028847850 3029452800 3030057750 3030662700 3031267650 3031872600 3032477550 3033082500 3033687450 3034292400 3034897350 3035502300 3036107250 3036712200 3037317150 3037922100 3038527050 3039132000 3039736950 3040341900 3040946850 3041551800 3042156750 3042761700 3043366650 3043971600 3044576550 3045181500 3045786450 3046391400 3046996350 3047601300 3048206250 3048811200 3049416150 3050021100 3050626050 3051231000 3051835950 3052440900 3053045850 3053650800 3054255750 3054860700 3055465650 3056070600 3056675550 3057280500 3057885450 3058490400 3059095350 3059700300 3060305250 3060910200 3061515150 3062120100 3062725050 3052335000 3052949950 3053564900 3054179850 3054794800 3055409750 3056024700 3056639650 3057254600 3057869550 3058484500 3059099450 3059714400 3060329350 3060944300 3061559250 3062174200 3062789150 3063404100 3064019050 3064634000 3065248950 3065863900 3066478850 3067093800 3067708750 3068323700 3068938650 3069553600 3070168550 3070783500 3071398450 3072013400 3072628350 3073243300 3073858250 3074473200 3075088150 3075703100 3076318050 3076933000 3077547950 3078162900 3078777850 3079392800 3080007750 3080622700 3081237650 3081852600 3082467550 3083082500 3083697450 3084312400 3084927350 3085542300 3086157250 3086772200 3087387150 3088002100 3088617050 3089232000 3089846950 3090461900 3091076850 3091691800 3092306750 3092921700 3093536650 3094151600 3094766550 3095381500 3095996450 3096611400 3097226350 3097841300 3098456250 3099071200 3099686150 3100301100 3100916050 3101531000 3102145950 3102760900 3103375850 3103990800 3104605750 3105220700 3105835650 3106450600 3107065550 3107680500 3108295450 3108910400 3109525350 3110140300 3110755250 3111370200 3111985150 3112600100 3113215050 3101835000 3102459950 3103084900 3103709850 3104334800 3104959750 3105584700 3106209650 3106834600 3107459550 3108084500 3108709450 3109334400 3109959350 3110584300 3111209250 3111834200 3112459150 3113084100 3113709050 3114334000 3114958950 3115583900 3116208850 3116833800 3117458750 3118083700 3118708650 3119333600 3119958550 3120583500 3121208450 3121833400 3122458350 3123083300 3123708250 3124333200 3124958150 3125583100 3126208050 3126833000 3127457950 3128082900 3128707850 3129332800 3129957750 3130582700 3131207650 3131832600 3132457550 3133082500 3133707450 3134332400 3134957350 3135582300 3136207250 3136832200 3137457150 3138082100 3138707050 3139332000 3139956950 3140581900 3141206850 3141831800 3142456750 3143081700 3143706650 3144331600 3144956550 3145581500 3146206450 3146831400 3147456350 3148081300 3148706250 3149331200 3149956150 3150581100 3151206050 3151831000 3152455950 3153080900 3153705850 3154330800 3154955750 3155580700 3156205650 3156830600 3157455550 3158080500 3158705450 3159330400 3159955350 3160580300 3161205250 3161830200 3162455150 3163080100 3163705050 3151335000 3151969950 3152604900 3153239850 3153874800 3154509750 3155144700 3155779650 3156414600 3157049550 3157684500 3158319450 3158954400 3159589350 3160224300 3160859250 3161494200 3162129150 3162764100 3163399050 3164034000 3164668950 3165303900 3165938850 3166573800 3167208750 3167843700 3168478650 3169113600 3169748550 3170383500 3171018450 3171653400 3172288350 3172923300 3173558250 3174193200 3174828150 3175463100 3176098050 3176733000 3177367950 3178002900 3178637850 3179272800 3179907750 3180542700 3181177650 3181812600 3182447550 3183082500 3183717450 3184352400 3184987350 3185622300 3186257250 3186892200 3187527150 3188162100 3188797050 3189432000 3190066950 3190701900 3191336850 3191971800 3192606750 3193241700 3193876650 3194511600 3195146550 3195781500 3196416450 3197051400 3197686350 3198321300 3198956250 3199591200 3200226150 3200861100 3201496050 3202131000 3202765950 3203400900 3204035850 3204670800 3205305750 3205940700 3206575650 3207210600 3207845550 3208480500 3209115450 3209750400 3210385350 3211020300 3211655250 3212290200 3212925150 3213560100 3214195050 3200835000 3201479950 3202124900 3202769850 3203414800 3204059750 3204704700 3205349650 3205994600 3206639550 3207284500 3207929450 3208574400 3209219350 3209864300 3210509250 3211154200 3211799150 3212444100 3213089050 3213734000 3214378950 3215023900 3215668850 3216313800 3216958750 3217603700 3218248650 3218893600 3219538550 3220183500 3220828450 3221473400 3222118350 3222763300 3223408250 3224053200 3224698150 3225343100 3225988050 3226633000 3227277950 3227922900 3228567850 3229212800 3229857750 3230502700 3231147650 3231792600 3232437550 3233082500 3233727450 3234372400 3235017350 3235662300 3236307250 3236952200 3237597150 3238242100 3238887050 3239532000 3240176950 3240821900 3241466850 3242111800 3242756750 3243401700 3244046650 3244691600 3245336550 3245981500 3246626450 3247271400 3247916350 3248561300 3249206250 3249851200 3250496150 3251141100 3251786050 3252431000 3253075950 3253720900 3254365850 3255010800 3255655750 3256300700 3256945650 3257590600 3258235550 3258880500 3259525450 3260170400 3260815350 3261460300 3262105250 3262750200 3263395150 3264040100 3264685050 3250335000 3250989950 3251644900 3252299850 3252954800 3253609750 3254264700 3254919650 3255574600 3256229550 3256884500 3257539450 3258194400 3258849350 3259504300 3260159250 3260814200 3261469150 3262124100 3262779050 3263434000 3264088950 3264743900 3265398850 3266053800 3266708750 3267363700 3268018650 3268673600 3269328550 3269983500 3270638450 3271293400 3271948350 3272603300 3273258250 3273913200 3274568150 3275223100 3275878050 3276533000 3277187950 3277842900 3278497850 3279152800 3279807750 3280462700 3281117650 3281772600 3282427550 3283082500 3283737450 3284392400 3285047350 3285702300 3286357250 3287012200 3287667150 3288322100 3288977050 3289632000 3290286950 3290941900 3291596850 3292251800 3292906750 3293561700 3294216650 3294871600 3295526550 3296181500 3296836450 3297491400 3298146350 3298801300 3299456250 3300111200 3300766150 3301421100 3302076050 3302731000 3303385950 3304040900 3304695850 3305350800 3306005750 3306660700 3307315650 3307970600 3308625550 3309280500 3309935450 3310590400 3311245350 3311900300 3312555250 3313210200 3313865150 3314520100 3315175050 3299835000 3300499950 3301164900 3301829850 3302494800 3303159750 3303824700 3304489650 3305154600 3305819550 3306484500 3307149450 3307814400 3308479350 3309144300 3309809250 3310474200 3311139150 3311804100 3312469050 3313134000 3313798950 3314463900 3315128850 3315793800 3316458750 3317123700 3317788650 3318453600 3319118550 3319783500 3320448450 3321113400 3321778350 3322443300 3323108250 3323773200 3324438150 3325103100 3325768050 3326433000 3327097950 3327762900 3328427850 3329092800 3329757750 3330422700 3331087650 3331752600 3332417550 3333082500 3333747450 3334412400 3335077350 3335742300 3336407250 3337072200 3337737150 3338402100 3339067050 3339732000 3340396950 3341061900 3341726850 3342391800 3343056750 3343721700 3344386650 3345051600 3345716550 3346381500 3347046450 3347711400 3348376350 3349041300 3349706250 3350371200 3351036150 3351701100 3352366050 3353031000 3353695950 3354360900 3355025850 3355690800 3356355750 3357020700 3357685650 3358350600 3359015550 3359680500 3360345450 3361010400 3361675350 3362340300 3363005250 3363670200 3364335150 3365000100 3365665050 3349335000 3350009950 3350684900 3351359850 3352034800 3352709750 3353384700 3354059650 3354734600 3355409550 3356084500 3356759450 3357434400 3358109350 3358784300 3359459250 3360134200 3360809150 3361484100 3362159050 3362834000 3363508950 3364183900 3364858850 3365533800 3366208750 3366883700 3367558650 3368233600 3368908550 3369583500 3370258450 3370933400 3371608350 3372283300 3372958250 3373633200 3374308150 3374983100 3375658050 3376333000 3377007950 3377682900 3378357850 3379032800 3379707750 3380382700 3381057650 3381732600 3382407550 3383082500 3383757450 3384432400 3385107350 3385782300 3386457250 3387132200 3387807150 3388482100 3389157050 3389832000 3390506950 3391181900 3391856850 3392531800 3393206750 3393881700 3394556650 3395231600 3395906550 3396581500 3397256450 3397931400 3398606350 3399281300 3399956250 3400631200 3401306150 3401981100 3402656050 3403331000 3404005950 3404680900 3405355850 3406030800 3406705750 3407380700 3408055650 3408730600 3409405550 3410080500 3410755450 3411430400 3412105350 3412780300 3413455250 3414130200 3414805150 3415480100 3416155050 3398835000 3399519950 3400204900 3400889850 3401574800 3402259750 3402944700 3403629650 3404314600 3404999550 3405684500 3406369450 3407054400 3407739350 3408424300 3409109250 3409794200 3410479150 3411164100 3411849050 3412534000 3413218950 3413903900 3414588850 3415273800 3415958750 3416643700 3417328650 3418013600 3418698550 3419383500 3420068450 3420753400 3421438350 3422123300 3422808250 3423493200 3424178150 3424863100 3425548050 3426233000 3426917950 3427602900 3428287850 3428972800 3429657750 3430342700 3431027650 3431712600 3432397550 3433082500 3433767450 3434452400 3435137350 3435822300 3436507250 3437192200 3437877150 3438562100 3439247050 3439932000 3440616950 3441301900 3441986850 3442671800 3443356750 3444041700 3444726650 3445411600 3446096550 3446781500 3447466450 3448151400 3448836350 3449521300 3450206250 3450891200 3451576150 3452261100 3452946050 3453631000 3454315950 3455000900 3455685850 3456370800 3457055750 3457740700 3458425650 3459110600 3459795550 3460480500 3461165450 3461850400 3462535350 3463220300 3463905250 3464590200 3465275150 3465960100 3466645050 3448335000 3449029950 3449724900 3450419850 3451114800 3451809750 3452504700 3453199650 3453894600 3454589550 3455284500 3455979450 3456674400 3457369350 3458064300 3458759250 3459454200 3460149150 3460844100 3461539050 3462234000 3462928950 3463623900 3464318850 3465013800 3465708750 3466403700 3467098650 3467793600 3468488550 3469183500 3469878450 3470573400 3471268350 3471963300 3472658250 3473353200 3474048150 3474743100 3475438050 3476133000 3476827950 3477522900 3478217850 3478912800 3479607750 3480302700 3480997650 3481692600 3482387550 3483082500 3483777450 3484472400 3485167350 3485862300 3486557250 3487252200 3487947150 3488642100 3489337050 3490032000 3490726950 3491421900 3492116850 3492811800 3493506750 3494201700 3494896650 3495591600 3496286550 3496981500 3497676450 3498371400 3499066350 3499761300 3500456250 3501151200 3501846150 3502541100 3503236050 3503931000 3504625950 3505320900 3506015850 3506710800 3507405750 3508100700 3508795650 3509490600 3510185550 3510880500 3511575450 3512270400 3512965350 3513660300 3514355250 3515050200 3515745150 3516440100 3517135050 3497835000 3498539950 3499244900 3499949850 3500654800 3501359750 3502064700 3502769650 3503474600 3504179550 3504884500 3505589450 3506294400 3506999350 3507704300 3508409250 3509114200 3509819150 3510524100 3511229050 3511934000 3512638950 3513343900 3514048850 3514753800 3515458750 3516163700 3516868650 3517573600 3518278550 3518983500 3519688450 3520393400 3521098350 3521803300 3522508250 3523213200 3523918150 3524623100 3525328050 3526033000 3526737950 3527442900 3528147850 3528852800 3529557750 3530262700 3530967650 3531672600 3532377550 3533082500 3533787450 3534492400 3535197350 3535902300 3536607250 3537312200 3538017150 3538722100 3539427050 3540132000 3540836950 3541541900 3542246850 3542951800 3543656750 3544361700 3545066650 3545771600 3546476550 3547181500 3547886450 3548591400 3549296350 3550001300 3550706250 3551411200 3552116150 3552821100 3553526050 3554231000 3554935950 3555640900 3556345850 3557050800 3557755750 3558460700 3559165650 3559870600 3560575550 3561280500 3561985450 3562690400 3563395350 3564100300 3564805250 3565510200 3566215150 3566920100 3567625050 3547335000 3548049950 3548764900 3549479850 3550194800 3550909750 3551624700 3552339650 3553054600 3553769550 3554484500 3555199450 3555914400 3556629350 3557344300 3558059250 3558774200 3559489150 3560204100 3560919050 3561634000 3562348950 3563063900 3563778850 3564493800 3565208750 3565923700 3566638650 3567353600 3568068550 3568783500 3569498450 3570213400 3570928350 3571643300 3572358250 3573073200 3573788150 3574503100 3575218050 3575933000 3576647950 3577362900 3578077850 3578792800 3579507750 3580222700 3580937650 3581652600 3582367550 3583082500 3583797450 3584512400 3585227350 3585942300 3586657250 3587372200 3588087150 3588802100 3589517050 3590232000 3590946950 3591661900 3592376850 3593091800 3593806750 3594521700 3595236650 3595951600 3596666550 3597381500 3598096450 3598811400 3599526350 3600241300 3600956250 3601671200 3602386150 3603101100 3603816050 3604531000 3605245950 3605960900 3606675850 3607390800 3608105750 3608820700 3609535650 3610250600 3610965550 3611680500 3612395450 3613110400 3613825350 3614540300 3615255250 3615970200 3616685150 3617400100 3618115050 3596835000 3597559950 3598284900 3599009850 3599734800 3600459750 3601184700 3601909650 3602634600 3603359550 3604084500 3604809450 3605534400 3606259350 3606984300 3607709250 3608434200 3609159150 3609884100 3610609050 3611334000 3612058950 3612783900 3613508850 3614233800 3614958750 3615683700 3616408650 3617133600 3617858550 3618583500 3619308450 3620033400 3620758350 3621483300 3622208250 3622933200 3623658150 3624383100 3625108050 3625833000 3626557950 3627282900 3628007850 3628732800 3629457750 3630182700 3630907650 3631632600 3632357550 3633082500 3633807450 3634532400 3635257350 3635982300 3636707250 3637432200 3638157150 3638882100 3639607050 3640332000 3641056950 3641781900 3642506850 3643231800 3643956750 3644681700 3645406650 3646131600 3646856550 3647581500 3648306450 3649031400 3649756350 3650481300 3651206250 3651931200 3652656150 3653381100 3654106050 3654831000 3655555950 3656280900 3657005850 3657730800 3658455750 3659180700 3659905650 3660630600 3661355550 3662080500 3662805450 3663530400 3664255350 3664980300 3665705250 3666430200 3667155150 3667880100 3668605050 3646335000 3647069950 3647804900 3648539850 3649274800 3650009750 3650744700 3651479650 3652214600 3652949550 3653684500 3654419450 3655154400 3655889350 3656624300 3657359250 3658094200 3658829150 3659564100 3660299050 3661034000 3661768950 3662503900 3663238850 3663973800 3664708750 3665443700 3666178650 3666913600 3667648550 3668383500 3669118450 3669853400 3670588350 3671323300 3672058250 3672793200 3673528150 3674263100 3674998050 3675733000 3676467950 3677202900 3677937850 3678672800 3679407750 3680142700 3680877650 3681612600 3682347550 3683082500 3683817450 3684552400 3685287350 3686022300 3686757250 3687492200 3688227150 3688962100 3689697050 3690432000 3691166950 3691901900 3692636850 3693371800 3694106750 3694841700 3695576650 3696311600 3697046550 3697781500 3698516450 3699251400 3699986350 3700721300 3701456250 3702191200 3702926150 3703661100 3704396050 3705131000 3705865950 3706600900 3707335850 3708070800 3708805750 3709540700 3710275650 3711010600 3711745550 3712480500 3713215450 3713950400 3714685350 3715420300 3716155250 3716890200 3717625150 3718360100 3719095050 3695835000 3696579950 3697324900 3698069850 3698814800 3699559750 3700304700 3701049650 3701794600 3702539550 3703284500 3704029450 3704774400 3705519350 3706264300 3707009250 3707754200 3708499150 3709244100 3709989050 3710734000 3711478950 3712223900 3712968850 3713713800 3714458750 3715203700 3715948650 3716693600 3717438550 3718183500 3718928450 3719673400 3720418350 3721163300 3721908250 3722653200 3723398150 3724143100 3724888050 3725633000 3726377950 3727122900 3727867850 3728612800 3729357750 3730102700 3730847650 3731592600 3732337550 3733082500 3733827450 3734572400 3735317350 3736062300 3736807250 3737552200 3738297150 3739042100 3739787050 3740532000 3741276950 3742021900 3742766850 3743511800 3744256750 3745001700 3745746650 3746491600 3747236550 3747981500 3748726450 3749471400 3750216350 3750961300 3751706250 3752451200 3753196150 3753941100 3754686050 3755431000 3756175950 3756920900 3757665850 3758410800 3759155750 3759900700 3760645650 3761390600 3762135550 3762880500 3763625450 3764370400 3765115350 3765860300 3766605250 3767350200 3768095150 3768840100 3769585050 3745335000 3746089950 3746844900 3747599850 3748354800 3749109750 3749864700 3750619650 3751374600 3752129550 3752884500 3753639450 3754394400 3755149350 3755904300 3756659250 3757414200 3758169150 3758924100 3759679050 3760434000 3761188950 3761943900 3762698850 3763453800 3764208750 3764963700 3765718650 3766473600 3767228550 3767983500 3768738450 3769493400 3770248350 3771003300 3771758250 3772513200 3773268150 3774023100 3774778050 3775533000 3776287950 3777042900 3777797850 3778552800 3779307750 3780062700 3780817650 3781572600 3782327550 3783082500 3783837450 3784592400 3785347350 3786102300 3786857250 3787612200 3788367150 3789122100 3789877050 3790632000 3791386950 3792141900 3792896850 3793651800 3794406750 3795161700 3795916650 3796671600 3797426550 3798181500 3798936450 3799691400 3800446350 3801201300 3801956250 3802711200 3803466150 3804221100 3804976050 3805731000 3806485950 3807240900 3807995850 3808750800 3809505750 3810260700 3811015650 3811770600 3812525550 3813280500 3814035450 3814790400 3815545350 3816300300 3817055250 3817810200 3818565150 3819320100 3820075050 3794835000 3795599950 3796364900 3797129850 3797894800 3798659750 3799424700 3800189650 3800954600 3801719550 3802484500 3803249450 3804014400 3804779350 3805544300 3806309250 3807074200 3807839150 3808604100 3809369050 3810134000 3810898950 3811663900 3812428850 3813193800 3813958750 3814723700 3815488650 3816253600 3817018550 3817783500 3818548450 3819313400 3820078350 3820843300 3821608250 3822373200 3823138150 3823903100 3824668050 3825433000 3826197950 3826962900 3827727850 3828492800 3829257750 3830022700 3830787650 3831552600 3832317550 3833082500 3833847450 3834612400 3835377350 3836142300 3836907250 3837672200 3838437150 3839202100 3839967050 3840732000 3841496950 3842261900 3843026850 3843791800 3844556750 3845321700 3846086650 3846851600 3847616550 3848381500 3849146450 3849911400 3850676350 3851441300 3852206250 3852971200 3853736150 3854501100 3855266050 3856031000 3856795950 3857560900 3858325850 3859090800 3859855750 3860620700 3861385650 3862150600 3862915550 3863680500 3864445450 3865210400 3865975350 3866740300 3867505250 3868270200 3869035150 3869800100 3870565050 3844335000 3845109950 3845884900 3846659850 3847434800 3848209750 3848984700 3849759650 3850534600 3851309550 3852084500 3852859450 3853634400 3854409350 3855184300 3855959250 3856734200 3857509150 3858284100 3859059050 3859834000 3860608950 3861383900 3862158850 3862933800 3863708750 3864483700 3865258650 3866033600 3866808550 3867583500 3868358450 3869133400 3869908350 3870683300 3871458250 3872233200 3873008150 3873783100 3874558050 3875333000 3876107950 3876882900 3877657850 3878432800 3879207750 3879982700 3880757650 3881532600 3882307550 3883082500 3883857450 3884632400 3885407350 3886182300 3886957250 3887732200 3888507150 3889282100 3890057050 3890832000 3891606950 3892381900 3893156850 3893931800 3894706750 3895481700 3896256650 3897031600 3897806550 3898581500 3899356450 3900131400 3900906350 3901681300 3902456250 3903231200 3904006150 3904781100 3905556050 3906331000 3907105950 3907880900 3908655850 3909430800 3910205750 3910980700 3911755650 3912530600 3913305550 3914080500 3914855450 3915630400 3916405350 3917180300 3917955250 3918730200 3919505150 3920280100 3921055050 3893835000 3894619950 3895404900 3896189850 3896974800 3897759750 3898544700 3899329650 3900114600 3900899550 3901684500 3902469450 3903254400 3904039350 3904824300 3905609250 3906394200 3907179150 3907964100 3908749050 3909534000 3910318950 3911103900 3911888850 3912673800 3913458750 3914243700 3915028650 3915813600 3916598550 3917383500 3918168450 3918953400 3919738350 3920523300 3921308250 3922093200 3922878150 3923663100 3924448050 3925233000 3926017950 3926802900 3927587850 3928372800 3929157750 3929942700 3930727650 3931512600 3932297550 3933082500 3933867450 3934652400 3935437350 3936222300 3937007250 3937792200 3938577150 3939362100 3940147050 3940932000 3941716950 3942501900 3943286850 3944071800 3944856750 3945641700 3946426650 3947211600 3947996550 3948781500 3949566450 3950351400 3951136350 3951921300 3952706250 3953491200 3954276150 3955061100 3955846050 3956631000 3957415950 3958200900 3958985850 3959770800 3960555750 3961340700 3962125650 3962910600 3963695550 3964480500 3965265450 3966050400 3966835350 3967620300 3968405250 3969190200 3969975150 3970760100 3971545050 3943335000 3944129950 3944924900 3945719850 3946514800 3947309750 3948104700 3948899650 3949694600 3950489550 3951284500 3952079450 3952874400 3953669350 3954464300 3955259250 3956054200 3956849150 3957644100 3958439050 3959234000 3960028950 3960823900 3961618850 3962413800 3963208750 3964003700 3964798650 3965593600 3966388550 3967183500 3967978450 3968773400 3969568350 3970363300 3971158250 3971953200 3972748150 3973543100 3974338050 3975133000 3975927950 3976722900 3977517850 3978312800 3979107750 3979902700 3980697650 3981492600 3982287550 3983082500 3983877450 3984672400 3985467350 3986262300 3987057250 3987852200 3988647150 3989442100 3990237050 3991032000 3991826950 3992621900 3993416850 3994211800 3995006750 3995801700 3996596650 3997391600 3998186550 3998981500 3999776450 4000571400 4001366350 4002161300 4002956250 4003751200 4004546150 4005341100 4006136050 4006931000 4007725950 4008520900 4009315850 4010110800 4010905750 4011700700 4012495650 4013290600 4014085550 4014880500 4015675450 4016470400 4017265350 4018060300 4018855250 4019650200 4020445150 4021240100 4022035050 3992835000 3993639950 3994444900 3995249850 3996054800 3996859750 3997664700 3998469650 3999274600 4000079550 4000884500 4001689450 4002494400 4003299350 4004104300 4004909250 4005714200 4006519150 4007324100 4008129050 4008934000 4009738950 4010543900 4011348850 4012153800 4012958750 4013763700 4014568650 4015373600 4016178550 4016983500 4017788450 4018593400 4019398350 4020203300 4021008250 4021813200 4022618150 4023423100 4024228050 4025033000 4025837950 4026642900 4027447850 4028252800 4029057750 4029862700 4030667650 4031472600 4032277550 4033082500 4033887450 4034692400 4035497350 4036302300 4037107250 4037912200 4038717150 4039522100 4040327050 4041132000 4041936950 4042741900 4043546850 4044351800 4045156750 4045961700 4046766650 4047571600 4048376550 4049181500 4049986450 4050791400 4051596350 4052401300 4053206250 4054011200 4054816150 4055621100 4056426050 4057231000 4058035950 4058840900 4059645850 4060450800 4061255750 4062060700 4062865650 4063670600 4064475550 4065280500 4066085450 4066890400 4067695350 4068500300 4069305250 4070110200 4070915150 4071720100 4072525050 4042335000 4043149950 4043964900 4044779850 4045594800 4046409750 4047224700 4048039650 4048854600 4049669550 4050484500 4051299450 4052114400 4052929350 4053744300 4054559250 4055374200 4056189150 4057004100 4057819050 4058634000 4059448950 4060263900 4061078850 4061893800 4062708750 4063523700 4064338650 4065153600 4065968550 4066783500 4067598450 4068413400 4069228350 4070043300 4070858250 4071673200 4072488150 4073303100 4074118050 4074933000 4075747950 4076562900 4077377850 4078192800 4079007750 4079822700 4080637650 4081452600 4082267550 4083082500 4083897450 4084712400 4085527350 4086342300 4087157250 4087972200 4088787150 4089602100 4090417050 4091232000 4092046950 4092861900 4093676850 4094491800 4095306750 4096121700 4096936650 4097751600 4098566550 4099381500 4100196450 4101011400 4101826350 4102641300 4103456250 4104271200 4105086150 4105901100 4106716050 4107531000 4108345950 4109160900 4109975850 4110790800 4111605750 4112420700 4113235650 4114050600 4114865550 4115680500 4116495450 4117310400 4118125350 4118940300 4119755250 4120570200 4121385150 4122200100 4123015050 4091835000 4092659950 4093484900 4094309850 4095134800 4095959750 4096784700 4097609650 4098434600 4099259550 4100084500 4100909450 4101734400 4102559350 4103384300 4104209250 4105034200 4105859150 4106684100 4107509050 4108334000 4109158950 4109983900 4110808850 4111633800 4112458750 4113283700 4114108650 4114933600 4115758550 4116583500 4117408450 4118233400 4119058350 4119883300 4120708250 4121533200 4122358150 4123183100 4124008050 4124833000 4125657950 4126482900 4127307850 4128132800 4128957750 4129782700 4130607650 4131432600 4132257550 4133082500 4133907450 4134732400 4135557350 4136382300 4137207250 4138032200 4138857150 4139682100 4140507050 4141332000 4142156950 4142981900 4143806850 4144631800 4145456750 4146281700 4147106650 4147931600 4148756550 4149581500 4150406450 4151231400 4152056350 4152881300 4153706250 4154531200 4155356150 4156181100 4157006050 4157831000 4158655950 4159480900 4160305850 4161130800 4161955750 4162780700 4163605650 4164430600 4165255550 4166080500 4166905450 4167730400 4168555350 4169380300 4170205250 4171030200 4171855150 4172680100 4173505050 4141335000 4142169950 4143004900 4143839850 4144674800 4145509750 4146344700 4147179650 4148014600 4148849550 4149684500 4150519450 4151354400 4152189350 4153024300 4153859250 4154694200 4155529150 4156364100 4157199050 4158034000 4158868950 4159703900 4160538850 4161373800 4162208750 4163043700 4163878650 4164713600 4165548550 4166383500 4167218450 4168053400 4168888350 4169723300 4170558250 4171393200 4172228150 4173063100 4173898050 4174733000 4175567950 4176402900 4177237850 4178072800 4178907750 4179742700 4180577650 4181412600 4182247550 4183082500 4183917450 4184752400 4185587350 4186422300 4187257250 4188092200 4188927150 4189762100 4190597050 4191432000 4192266950 4193101900 4193936850 4194771800 4195606750 4196441700 4197276650 4198111600 4198946550 4199781500 4200616450 4201451400 4202286350 4203121300 4203956250 4204791200 4205626150 4206461100 4207296050 4208131000 4208965950 4209800900 4210635850 4211470800 4212305750 4213140700 4213975650 4214810600 4215645550 4216480500 4217315450 4218150400 4218985350 4219820300 4220655250 4221490200 4222325150 4223160100 4223995050 4190835000 4191679950 4192524900 4193369850 4194214800 4195059750 4195904700 4196749650 4197594600 4198439550 4199284500 4200129450 4200974400 4201819350 4202664300 4203509250 4204354200 4205199150 4206044100 4206889050 4207734000 4208578950 4209423900 4210268850 4211113800 4211958750 4212803700 4213648650 4214493600 4215338550 4216183500 4217028450 4217873400 4218718350 4219563300 4220408250 4221253200 4222098150 4222943100 4223788050 4224633000 4225477950 4226322900 4227167850 4228012800 4228857750 4229702700 4230547650 4231392600 4232237550 4233082500 4233927450 4234772400 4235617350 4236462300 4237307250 4238152200 4238997150 4239842100 4240687050 4241532000 4242376950 4243221900 4244066850 4244911800 4245756750 4246601700 4247446650 4248291600 4249136550 4249981500 4250826450 4251671400 4252516350 4253361300 4254206250 4255051200 4255896150 4256741100 4257586050 4258431000 4259275950 4260120900 4260965850 4261810800 4262655750 4263500700 4264345650 4265190600 4266035550 4266880500 4267725450 4268570400 4269415350 4270260300 4271105250 4271950200 4272795150 4273640100 4274485050 4240335000 4241189950 4242044900 4242899850 4243754800 4244609750 4245464700 4246319650 4247174600 4248029550 4248884500 4249739450 4250594400 4251449350 4252304300 4253159250 4254014200 4254869150 4255724100 4256579050 4257434000 4258288950 4259143900 4259998850 4260853800 4261708750 4262563700 4263418650 4264273600 4265128550 4265983500 4266838450 4267693400 4268548350 4269403300 4270258250 4271113200 4271968150 4272823100 4273678050 4274533000 4275387950 4276242900 4277097850 4277952800 4278807750 4279662700 4280517650 4281372600 4282227550 4283082500 4283937450 4284792400 4285647350 4286502300 4287357250 4288212200 4289067150 4289922100 4290777050 4291632000 4292486950 4293341900 4294196850 4295051800 4295906750 4296761700 4297616650 4298471600 4299326550 4300181500 4301036450 4301891400 4302746350 4303601300 4304456250 4305311200 4306166150 4307021100 4307876050 4308731000 4309585950 4310440900 4311295850 4312150800 4313005750 4313860700 4314715650 4315570600 4316425550 4317280500 4318135450 4318990400 4319845350 4320700300 4321555250 4322410200 4323265150 4324120100 4324975050 4289835000 4290699950 4291564900 4292429850 4293294800 4294159750 4295024700 4295889650 4296754600 4297619550 4298484500 4299349450 4300214400 4301079350 4301944300 4302809250 4303674200 4304539150 4305404100 4306269050 4307134000 4307998950 4308863900 4309728850 4310593800 4311458750 4312323700 4313188650 4314053600 4314918550 4315783500 4316648450 4317513400 4318378350 4319243300 4320108250 4320973200 4321838150 4322703100 4323568050 4324433000 4325297950 4326162900 4327027850 4327892800 4328757750 4329622700 4330487650 4331352600 4332217550 4333082500 4333947450 4334812400 4335677350 4336542300 4337407250 4338272200 4339137150 4340002100 4340867050 4341732000 4342596950 4343461900 4344326850 4345191800 4346056750 4346921700 4347786650 4348651600 4349516550 4350381500 4351246450 4352111400 4352976350 4353841300 4354706250 4355571200 4356436150 4357301100 4358166050 4359031000 4359895950 4360760900 4361625850 4362490800 4363355750 4364220700 4365085650 4365950600 4366815550 4367680500 4368545450 4369410400 4370275350 4371140300 4372005250 4372870200 4373735150 4374600100 4375465050 4339335000 4340209950 4341084900 4341959850 4342834800 4343709750 4344584700 4345459650 4346334600 4347209550 4348084500 4348959450 4349834400 4350709350 4351584300 4352459250 4353334200 4354209150 4355084100 4355959050 4356834000 4357708950 4358583900 4359458850 4360333800 4361208750 4362083700 4362958650 4363833600 4364708550 4365583500 4366458450 4367333400 4368208350 4369083300 4369958250 4370833200 4371708150 4372583100 4373458050 4374333000 4375207950 4376082900 4376957850 4377832800 4378707750 4379582700 4380457650 4381332600 4382207550 4383082500 4383957450 4384832400 4385707350 4386582300 4387457250 4388332200 4389207150 4390082100 4390957050 4391832000 4392706950 4393581900 4394456850 4395331800 4396206750 4397081700 4397956650 4398831600 4399706550 4400581500 4401456450 4402331400 4403206350 4404081300 4404956250 4405831200 4406706150 4407581100 4408456050 4409331000 4410205950 4411080900 4411955850 4412830800 4413705750 4414580700 4415455650 4416330600 4417205550 4418080500 4418955450 4419830400 4420705350 4421580300 4422455250 4423330200 4424205150 4425080100 4425955050 4388835000 4389719950 4390604900 4391489850 4392374800 4393259750 4394144700 4395029650 4395914600 4396799550 4397684500 4398569450 4399454400 4400339350 4401224300 4402109250 4402994200 4403879150 4404764100 4405649050 4406534000 4407418950 4408303900 4409188850 4410073800 4410958750 4411843700 4412728650 4413613600 4414498550 4415383500 4416268450 4417153400 4418038350 4418923300 4419808250 4420693200 4421578150 4422463100 4423348050 4424233000 4425117950 4426002900 4426887850 4427772800 4428657750 4429542700 4430427650 4431312600 4432197550 4433082500 4433967450 4434852400 4435737350 4436622300 4437507250 4438392200 4439277150 4440162100 4441047050 4441932000 4442816950 4443701900 4444586850 4445471800 4446356750 4447241700 4448126650 4449011600 4449896550 4450781500 4451666450 4452551400 4453436350 4454321300 4455206250 4456091200 4456976150 4457861100 4458746050 4459631000 4460515950 4461400900 4462285850 4463170800 4464055750 4464940700 4465825650 4466710600 4467595550 4468480500 4469365450 4470250400 4471135350 4472020300 4472905250 4473790200 4474675150 4475560100 4476445050 4438335000 4439229950 4440124900 4441019850 4441914800 4442809750 4443704700 4444599650 4445494600 4446389550 4447284500 4448179450 4449074400 4449969350 4450864300 4451759250 4452654200 4453549150 4454444100 4455339050 4456234000 4457128950 4458023900 4458918850 4459813800 4460708750 4461603700 4462498650 4463393600 4464288550 4465183500 4466078450 4466973400 4467868350 4468763300 4469658250 4470553200 4471448150 4472343100 4473238050 4474133000 4475027950 4475922900 4476817850 4477712800 4478607750 4479502700 4480397650 4481292600 4482187550 4483082500 4483977450 4484872400 4485767350 4486662300 4487557250 4488452200 4489347150 4490242100 4491137050 4492032000 4492926950 4493821900 4494716850 4495611800 4496506750 4497401700 4498296650 4499191600 4500086550 4500981500 4501876450 4502771400 4503666350 4504561300 4505456250 4506351200 4507246150 4508141100 4509036050 4509931000 4510825950 4511720900 4512615850 4513510800 4514405750 4515300700 4516195650 4517090600 4517985550 4518880500 4519775450 4520670400 4521565350 4522460300 4523355250 4524250200 4525145150 4526040100 4526935050 4487835000 4488739950 4489644900 4490549850 4491454800 4492359750 4493264700 4494169650 4495074600 4495979550 4496884500 4497789450 4498694400 4499599350 4500504300 4501409250 4502314200 4503219150 4504124100 4505029050 4505934000 4506838950 4507743900 4508648850 4509553800 4510458750 4511363700 4512268650 4513173600 4514078550 4514983500 4515888450 4516793400 4517698350 4518603300 4519508250 4520413200 4521318150 4522223100 4523128050 4524033000 4524937950 4525842900 4526747850 4527652800 4528557750 4529462700 4530367650 4531272600 4532177550 4533082500 4533987450 4534892400 4535797350 4536702300 4537607250 4538512200 4539417150 4540322100 4541227050 4542132000 4543036950 4543941900 4544846850 4545751800 4546656750 4547561700 4548466650 4549371600 4550276550 4551181500 4552086450 4552991400 4553896350 4554801300 4555706250 4556611200 4557516150 4558421100 4559326050 4560231000 4561135950 4562040900 4562945850 4563850800 4564755750 4565660700 4566565650 4567470600 4568375550 4569280500 4570185450 4571090400 4571995350 4572900300 4573805250 4574710200 4575615150 4576520100 4577425050 4537335000 4538249950 4539164900 4540079850 4540994800 4541909750 4542824700 4543739650 4544654600 4545569550 4546484500 4547399450 4548314400 4549229350 4550144300 4551059250 4551974200 4552889150 4553804100 4554719050 4555634000 4556548950 4557463900 4558378850 4559293800 4560208750 4561123700 4562038650 4562953600 4563868550 4564783500 4565698450 4566613400 4567528350 4568443300 4569358250 4570273200 4571188150 4572103100 4573018050 4573933000 4574847950 4575762900 4576677850 4577592800 4578507750 4579422700 4580337650 4581252600 4582167550 4583082500 4583997450 4584912400 4585827350 4586742300 4587657250 4588572200 4589487150 4590402100 4591317050 4592232000 4593146950 4594061900 4594976850 4595891800 4596806750 4597721700 4598636650 4599551600 4600466550 4601381500 4602296450 4603211400 4604126350 4605041300 4605956250 4606871200 4607786150 4608701100 4609616050 4610531000 4611445950 4612360900 4613275850 4614190800 4615105750 4616020700 4616935650 4617850600 4618765550 4619680500 4620595450 4621510400 4622425350 4623340300 4624255250 4625170200 4626085150 4627000100 4627915050 4586835000 4587759950 4588684900 4589609850 4590534800 4591459750 4592384700 4593309650 4594234600 4595159550 4596084500 4597009450 4597934400 4598859350 4599784300 4600709250 4601634200 4602559150 4603484100 4604409050 4605334000 4606258950 4607183900 4608108850 4609033800 4609958750 4610883700 4611808650 4612733600 4613658550 4614583500 4615508450 4616433400 4617358350 4618283300 4619208250 4620133200 4621058150 4621983100 4622908050 4623833000 4624757950 4625682900 4626607850 4627532800 4628457750 4629382700 4630307650 4631232600 4632157550 4633082500 4634007450 4634932400 4635857350 4636782300 4637707250 4638632200 4639557150 4640482100 4641407050 4642332000 4643256950 4644181900 4645106850 4646031800 4646956750 4647881700 4648806650 4649731600 4650656550 4651581500 4652506450 4653431400 4654356350 4655281300 4656206250 4657131200 4658056150 4658981100 4659906050 4660831000 4661755950 4662680900 4663605850 4664530800 4665455750 4666380700 4667305650 4668230600 4669155550 4670080500 4671005450 4671930400 4672855350 4673780300 4674705250 4675630200 4676555150 4677480100 4678405050 4636335000 4637269950 4638204900 4639139850 4640074800 4641009750 4641944700 4642879650 4643814600 4644749550 4645684500 4646619450 4647554400 4648489350 4649424300 4650359250 4651294200 4652229150 4653164100 4654099050 4655034000 4655968950 4656903900 4657838850 4658773800 4659708750 4660643700 4661578650 4662513600 4663448550 4664383500 4665318450 4666253400 4667188350 4668123300 4669058250 4669993200 4670928150 4671863100 4672798050 4673733000 4674667950 4675602900 4676537850 4677472800 4678407750 4679342700 4680277650 4681212600 4682147550 4683082500 4684017450 4684952400 4685887350 4686822300 4687757250 4688692200 4689627150 4690562100 4691497050 4692432000 4693366950 4694301900 4695236850 4696171800 4697106750 4698041700 4698976650 4699911600 4700846550 4701781500 4702716450 4703651400 4704586350 4705521300 4706456250 4707391200 4708326150 4709261100 4710196050 4711131000 4712065950 4713000900 4713935850 4714870800 4715805750 4716740700 4717675650 4718610600 4719545550 4720480500 4721415450 4722350400 4723285350 4724220300 4725155250 4726090200 4727025150 4727960100 4728895050 4685835000 4686779950 4687724900 4688669850 4689614800 4690559750 4691504700 4692449650 4693394600 4694339550 4695284500 4696229450 4697174400 4698119350 4699064300 4700009250 4700954200 4701899150 4702844100 4703789050 4704734000 4705678950 4706623900 4707568850 4708513800 4709458750 4710403700 4711348650 4712293600 4713238550 4714183500 4715128450 4716073400 4717018350 4717963300 4718908250 4719853200 4720798150 4721743100 4722688050 4723633000 4724577950 4725522900 4726467850 4727412800 4728357750 4729302700 4730247650 4731192600 4732137550 4733082500 4734027450 4734972400 4735917350 4736862300 4737807250 4738752200 4739697150 4740642100 4741587050 4742532000 4743476950 4744421900 4745366850 4746311800 4747256750 4748201700 4749146650 4750091600 4751036550 4751981500 4752926450 4753871400 4754816350 4755761300 4756706250 4757651200 4758596150 4759541100 4760486050 4761431000 4762375950 4763320900 4764265850 4765210800 4766155750 4767100700 4768045650 4768990600 4769935550 4770880500 4771825450 4772770400 4773715350 4774660300 4775605250 4776550200 4777495150 4778440100 4779385050 4735335000 4736289950 4737244900 4738199850 4739154800 4740109750 4741064700 4742019650 4742974600 4743929550 4744884500 4745839450 4746794400 4747749350 4748704300 4749659250 4750614200 4751569150 4752524100 4753479050 4754434000 4755388950 4756343900 4757298850 4758253800 4759208750 4760163700 4761118650 4762073600 4763028550 4763983500 4764938450 4765893400 4766848350 4767803300 4768758250 4769713200 4770668150 4771623100 4772578050 4773533000 4774487950 4775442900 4776397850 4777352800 4778307750 4779262700 4780217650 4781172600 4782127550 4783082500 4784037450 4784992400 4785947350 4786902300 4787857250 4788812200 4789767150 4790722100 4791677050 4792632000 4793586950 4794541900 4795496850 4796451800 4797406750 4798361700 4799316650 4800271600 4801226550 4802181500 4803136450 4804091400 4805046350 4806001300 4806956250 4807911200 4808866150 4809821100 4810776050 4811731000 4812685950 4813640900 4814595850 4815550800 4816505750 4817460700 4818415650 4819370600 4820325550 4821280500 4822235450 4823190400 4824145350 4825100300 4826055250 4827010200 4827965150 4828920100 4829875050 4784835000 4785799950 4786764900 4787729850 4788694800 4789659750 4790624700 4791589650 4792554600 4793519550 4794484500 4795449450 4796414400 4797379350 4798344300 4799309250 4800274200 4801239150 4802204100 4803169050 4804134000 4805098950 4806063900 4807028850 4807993800 4808958750 4809923700 4810888650 4811853600 4812818550 4813783500 4814748450 4815713400 4816678350 4817643300 4818608250 4819573200 4820538150 4821503100 4822468050 4823433000 4824397950 4825362900 4826327850 4827292800 4828257750 4829222700 4830187650 4831152600 4832117550 4833082500 4834047450 4835012400 4835977350 4836942300 4837907250 4838872200 4839837150 4840802100 4841767050 4842732000 4843696950 4844661900 4845626850 4846591800 4847556750 4848521700 4849486650 4850451600 4851416550 4852381500 4853346450 4854311400 4855276350 4856241300 4857206250 4858171200 4859136150 4860101100 4861066050 4862031000 4862995950 4863960900 4864925850 4865890800 4866855750 4867820700 4868785650 4869750600 4870715550 4871680500 4872645450 4873610400 4874575350 4875540300 4876505250 4877470200 4878435150 4879400100 4880365050 4834335000 4835309950 4836284900 4837259850 4838234800 4839209750 4840184700 4841159650 4842134600 4843109550 4844084500 4845059450 4846034400 4847009350 4847984300 4848959250 4849934200 4850909150 4851884100 4852859050 4853834000 4854808950 4855783900 4856758850 4857733800 4858708750 4859683700 4860658650 4861633600 4862608550 4863583500 4864558450 4865533400 4866508350 4867483300 4868458250 4869433200 4870408150 4871383100 4872358050 4873333000 4874307950 4875282900 4876257850 4877232800 4878207750 4879182700 4880157650 4881132600 4882107550 4883082500 4884057450 4885032400 4886007350 4886982300 4887957250 4888932200 4889907150 4890882100 4891857050 4892832000 4893806950 4894781900 4895756850 4896731800 4897706750 4898681700 4899656650 4900631600 4901606550 4902581500 4903556450 4904531400 4905506350 4906481300 4907456250 4908431200 4909406150 4910381100 4911356050 4912331000 4913305950 4914280900 4915255850 4916230800 4917205750 4918180700 4919155650 4920130600 4921105550 4922080500 4923055450 4924030400 4925005350 4925980300 4926955250 4927930200 4928905150 4929880100 4930855050 4883835000 4884819950 4885804900 4886789850 4887774800 4888759750 4889744700 4890729650 4891714600 4892699550 4893684500 4894669450 4895654400 4896639350 4897624300 4898609250 4899594200 4900579150 4901564100 4902549050 4903534000 4904518950 4905503900 4906488850 4907473800 4908458750 4909443700 4910428650 4911413600 4912398550 4913383500 4914368450 4915353400 4916338350 4917323300 4918308250 4919293200 4920278150 4921263100 4922248050 4923233000 4924217950 4925202900 4926187850 4927172800 4928157750 4929142700 4930127650 4931112600 4932097550 4933082500 4934067450 4935052400 4936037350 4937022300 4938007250 4938992200 4939977150 4940962100 4941947050 4942932000 4943916950 4944901900 4945886850 4946871800 4947856750 4948841700 4949826650 4950811600 4951796550 4952781500 4953766450 4954751400 4955736350 4956721300 4957706250 4958691200 4959676150 4960661100 4961646050 4962631000 4963615950 4964600900 4965585850 4966570800 4967555750 4968540700 4969525650 4970510600 4971495550 4972480500 4973465450 4974450400 4975435350 4976420300 4977405250 4978390200 4979375150 4980360100 4981345050 4933335000 4934329950 4935324900 4936319850 4937314800 4938309750 4939304700 4940299650 4941294600 4942289550 4943284500 4944279450 4945274400 4946269350 4947264300 4948259250 4949254200 4950249150 4951244100 4952239050 4953234000 4954228950 4955223900 4956218850 4957213800 4958208750 4959203700 4960198650 4961193600 4962188550 4963183500 4964178450 4965173400 4966168350 4967163300 4968158250 4969153200 4970148150 4971143100 4972138050 4973133000 4974127950 4975122900 4976117850 4977112800 4978107750 4979102700 4980097650 4981092600 4982087550 4983082500 4984077450 4985072400 4986067350 4987062300 4988057250 4989052200 4990047150 4991042100 4992037050 4993032000 4994026950 4995021900 4996016850 4997011800 4998006750 4999001700 4999996650 5000991600 5001986550 5002981500 5003976450 5004971400 5005966350 5006961300 5007956250 5008951200 5009946150 5010941100 5011936050 5012931000 5013925950 5014920900 5015915850 5016910800 5017905750 5018900700 5019895650 5020890600 5021885550 5022880500 5023875450 5024870400 5025865350 5026860300 5027855250 5028850200 5029845150 5030840100 5031835050 ==13282== Profiling application: ./a.out ==13282== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 94.72% 2.5322ms 1 2.5322ms 2.5322ms 2.5322ms matproductsharedmemory(__int64*, __int64*, __int64*) 3.68% 98.338us 2 49.169us 49.025us 49.313us [CUDA memcpy HtoD] 1.61% 42.913us 1 42.913us 42.913us 42.913us [CUDA memcpy DtoH] API calls: 98.22% 189.54ms 3 63.178ms 5.3290us 189.52ms cudaMalloc 1.43% 2.7661ms 3 922.02us 26.698us 2.6712ms cudaMemcpy 0.19% 361.76us 94 3.8480us 170ns 233.68us cuDeviceGetAttribute 0.08% 150.22us 3 50.073us 6.2080us 110.67us cudaFree 0.05% 89.941us 1 89.941us 89.941us 89.941us cuDeviceTotalMem 0.01% 27.216us 1 27.216us 27.216us 27.216us cuDeviceGetName 0.01% 24.939us 1 24.939us 24.939us 24.939us cudaLaunch 0.00% 2.2690us 3 756ns 186ns 1.7650us cuDeviceGetCount 0.00% 1.0820us 2 541ns 239ns 843ns cuDeviceGet 0.00% 955ns 3 318ns 172ns 542ns cudaSetupArgument 0.00% 724ns 1 724ns 724ns 724ns cudaConfigureCall */
60402d033dd6d21cf0c875ecfd16959e323af644.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* .optix.cu - Copyright 2019 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This file contains a minimal set of Optix functions. From here we will dispatch program flow to our own functions that implement the path tracer. */ #include "../kernels/noerrors.h" #include "helper_math.h" // global include files #include "../../RenderSystem/common_settings.h" #include "../../RenderSystem/common_types.h" #define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here #include "../core_settings.h" // global path tracing parameters extern "C" { __constant__ Params params; } // tools __device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; } __device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; } __device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; } static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension ) { // Adapated from E. Heitz. Arguments: // sampleIndex: 0..255 // sampleDimension: 0..255 x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255; // xor index based on optimized ranking int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255; // fetch value in sequence int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256]; // if the dimension is optimized, xor sequence value based on optimized scrambling value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536]; // convert to float and return return (0.5f + value) * (1.0f / 256.0f); } static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 ) { const float blade = (int)(r0 * 9); float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f; float x1, y1, x2, y2; __sincosf( blade * PI / 4.5f, &x1, &y1 ); __sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 ); if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2; const float xr = x1 * r1 + x2 * r2; const float yr = y1 * r1 + y2 * r2; float4 posLens = params.posLensSize; return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr); } static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed ) { // random point on pixel and lens int sx = pixelIdx % params.scrsize.x; int sy = pixelIdx / params.scrsize.x; float r0, r1, r2, r3; if (sampleIdx < 256) r0 = blueNoiseSampler( sx, sy, sampleIdx, 0 ), r1 = blueNoiseSampler( sx, sy, sampleIdx, 1 ), r2 = blueNoiseSampler( sx, sy, sampleIdx, 2 ), r3 = blueNoiseSampler( sx, sy, sampleIdx, 3 ); else r0 = RandomFloat( seed ), r1 = RandomFloat( seed ), r2 = RandomFloat( seed ), r3 = RandomFloat( seed ); O = RandomPointOnLens( r2, r3 ); float3 posOnPixel; if (params.distortion == 0) { const float u = ((float)sx + r0) * (1.0f / params.scrsize.x); const float v = ((float)sy + r1) * (1.0f / params.scrsize.y); posOnPixel = params.p1 + u * params.right + v * params.up; } else { const float tx = sx / (float)params.scrsize.x - 0.5f, ty = sy / (float)params.scrsize.y - 0.5f; const float rr = tx * tx + ty * ty; const float rq = sqrtf( rr ) * (1.0f + params.distortion * rr + params.distortion * rr * rr); const float theta = atan2f( tx, ty ); const float bx = (sinf( theta ) * rq + 0.5f) * params.scrsize.x; const float by = (cosf( theta ) * rq + 0.5f) * params.scrsize.y; posOnPixel = params.p1 + (bx + r0) * (params.right / (float)params.scrsize.x) + (by + r1) * (params.up / (float)params.scrsize.y); } D = normalize( posOnPixel - O ); } #if __CUDA_ARCH__ >= 700 #define THREADMASK __activemask() // volta, turing #else #define THREADMASK 0xffffffff // pascal, kepler, fermi #endif __device__ void setupPrimaryRay( const uint pathIdx, const uint stride ) { const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y); const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass; uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 ); // generate eye ray float3 O, D; generateEyeRay( O, D, pixelIdx, sampleIdx, seed ); // populate path state array params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 8) + 1 /* S_SPECULAR in CUDA code */ ) ); params.pathStates[pathIdx + stride] = make_float4( D, 0 ); // trace eye ray uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f ); optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 ); params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) ); } __device__ void setupSecondaryRay( const uint rayIdx, const uint stride ) { const float4 O4 = params.pathStates[rayIdx]; const float4 D4 = params.pathStates[rayIdx + stride]; float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 ); uint pixelIdx = __float_as_uint( O4.w ) >> 8; uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f ); optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 ); params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) ); } __device__ void generateShadowRay( const uint rayIdx, const uint stride ) { const float4 O4 = params.connectData[rayIdx]; // O4 const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4 // launch shadow ray uint u0 = 1; optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 ); if (u0) return; const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4 const int pixelIdx = __float_as_int( E4.w ); if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 ); } extern "C" __global__ void __raygen__rg() { const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z; const uint3 idx = optixGetLaunchIndex(); if (params.phase == 0) { // primary rays setupPrimaryRay( idx.x + idx.y * params.scrsize.x, stride ); } else if (params.phase == 1) { // secondary rays setupSecondaryRay( idx.x + idx.y * params.scrsize.x, stride ); } else { // shadow rays generateShadowRay( idx.x + idx.y * params.scrsize.x, stride ); } } extern "C" __global__ void __miss__occlusion() { optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io. } extern "C" __global__ void __closesthit__radiance() { const uint prim_idx = optixGetPrimitiveIndex(); const uint inst_idx = optixGetInstanceIndex(); const float2 bary = optixGetTriangleBarycentrics(); const float tmin = optixGetRayTmax(); optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) ); optixSetPayload_1( inst_idx ); optixSetPayload_2( prim_idx ); optixSetPayload_3( __float_as_uint( tmin ) ); } // EOF
60402d033dd6d21cf0c875ecfd16959e323af644.cu
/* .optix.cu - Copyright 2019 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This file contains a minimal set of Optix functions. From here we will dispatch program flow to our own functions that implement the path tracer. */ #include "../kernels/noerrors.h" #include "helper_math.h" // global include files #include "../../RenderSystem/common_settings.h" #include "../../RenderSystem/common_types.h" #define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here #include "../core_settings.h" // global path tracing parameters extern "C" { __constant__ Params params; } // tools __device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; } __device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; } __device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; } static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension ) { // Adapated from E. Heitz. Arguments: // sampleIndex: 0..255 // sampleDimension: 0..255 x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255; // xor index based on optimized ranking int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255; // fetch value in sequence int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256]; // if the dimension is optimized, xor sequence value based on optimized scrambling value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536]; // convert to float and return return (0.5f + value) * (1.0f / 256.0f); } static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 ) { const float blade = (int)(r0 * 9); float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f; float x1, y1, x2, y2; __sincosf( blade * PI / 4.5f, &x1, &y1 ); __sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 ); if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2; const float xr = x1 * r1 + x2 * r2; const float yr = y1 * r1 + y2 * r2; float4 posLens = params.posLensSize; return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr); } static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed ) { // random point on pixel and lens int sx = pixelIdx % params.scrsize.x; int sy = pixelIdx / params.scrsize.x; float r0, r1, r2, r3; if (sampleIdx < 256) r0 = blueNoiseSampler( sx, sy, sampleIdx, 0 ), r1 = blueNoiseSampler( sx, sy, sampleIdx, 1 ), r2 = blueNoiseSampler( sx, sy, sampleIdx, 2 ), r3 = blueNoiseSampler( sx, sy, sampleIdx, 3 ); else r0 = RandomFloat( seed ), r1 = RandomFloat( seed ), r2 = RandomFloat( seed ), r3 = RandomFloat( seed ); O = RandomPointOnLens( r2, r3 ); float3 posOnPixel; if (params.distortion == 0) { const float u = ((float)sx + r0) * (1.0f / params.scrsize.x); const float v = ((float)sy + r1) * (1.0f / params.scrsize.y); posOnPixel = params.p1 + u * params.right + v * params.up; } else { const float tx = sx / (float)params.scrsize.x - 0.5f, ty = sy / (float)params.scrsize.y - 0.5f; const float rr = tx * tx + ty * ty; const float rq = sqrtf( rr ) * (1.0f + params.distortion * rr + params.distortion * rr * rr); const float theta = atan2f( tx, ty ); const float bx = (sinf( theta ) * rq + 0.5f) * params.scrsize.x; const float by = (cosf( theta ) * rq + 0.5f) * params.scrsize.y; posOnPixel = params.p1 + (bx + r0) * (params.right / (float)params.scrsize.x) + (by + r1) * (params.up / (float)params.scrsize.y); } D = normalize( posOnPixel - O ); } #if __CUDA_ARCH__ >= 700 #define THREADMASK __activemask() // volta, turing #else #define THREADMASK 0xffffffff // pascal, kepler, fermi #endif __device__ void setupPrimaryRay( const uint pathIdx, const uint stride ) { const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y); const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass; uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 ); // generate eye ray float3 O, D; generateEyeRay( O, D, pixelIdx, sampleIdx, seed ); // populate path state array params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 8) + 1 /* S_SPECULAR in CUDA code */ ) ); params.pathStates[pathIdx + stride] = make_float4( D, 0 ); // trace eye ray uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f ); optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 ); params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) ); } __device__ void setupSecondaryRay( const uint rayIdx, const uint stride ) { const float4 O4 = params.pathStates[rayIdx]; const float4 D4 = params.pathStates[rayIdx + stride]; float4 result = make_float4( 0, 0, __int_as_float( -1 ), 0 ); uint pixelIdx = __float_as_uint( O4.w ) >> 8; uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f ); optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 ); params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) ); } __device__ void generateShadowRay( const uint rayIdx, const uint stride ) { const float4 O4 = params.connectData[rayIdx]; // O4 const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4 // launch shadow ray uint u0 = 1; optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 ); if (u0) return; const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4 const int pixelIdx = __float_as_int( E4.w ); if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 ); } extern "C" __global__ void __raygen__rg() { const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z; const uint3 idx = optixGetLaunchIndex(); if (params.phase == 0) { // primary rays setupPrimaryRay( idx.x + idx.y * params.scrsize.x, stride ); } else if (params.phase == 1) { // secondary rays setupSecondaryRay( idx.x + idx.y * params.scrsize.x, stride ); } else { // shadow rays generateShadowRay( idx.x + idx.y * params.scrsize.x, stride ); } } extern "C" __global__ void __miss__occlusion() { optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io. } extern "C" __global__ void __closesthit__radiance() { const uint prim_idx = optixGetPrimitiveIndex(); const uint inst_idx = optixGetInstanceIndex(); const float2 bary = optixGetTriangleBarycentrics(); const float tmin = optixGetRayTmax(); optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) ); optixSetPayload_1( inst_idx ); optixSetPayload_2( prim_idx ); optixSetPayload_3( __float_as_uint( tmin ) ); } // EOF
5bc4ff049d602ee4152d8c5d371652f7b366d3b0.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <debug.h> #include "lstm_cuda.h" #include "lstm_private_cuda.h" int lstm_clone_to_cuda(lstm_cuda_t* lstmCudaPtr, lstm_t lstm) { int i, j; int vecLen, nodeCount; int ret = LSTM_NO_ERROR; lstm_cuda_t tmpLstmCuda = NULL; struct LSTM_CULAYER* dstLayerRef; struct LSTM_LAYER* srcLayerRef; struct LSTM_CONFIG_STRUCT* cfgRef; LOG("enter"); // Get reference cfgRef = &lstm->config; // Create lstm cuda lstm_run(lstm_create_cuda(&tmpLstmCuda, cfgRef), ret, RET); #define __lstm_clone_base_to_cuda(cuMat, base) \ { \ int procIndex = cuMat * (nodeCount * vecLen) + j * vecLen; \ if(hipMemcpy(&dstLayerRef[i].baseMat.weight[procIndex], \ srcLayerRef[i].nodeList[j].base.weight, \ srcLayerRef[i - 1].nodeCount * sizeof(float), \ hipMemcpyHostToDevice) \ != hipSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ else \ { \ procIndex += srcLayerRef[i - 1].nodeCount; \ } \ \ if(i == 1) \ { \ if(hipMemcpy(&dstLayerRef[i].baseMat.weight[procIndex], \ srcLayerRef[i].nodeList[j].base.rWeight, \ srcLayerRef[cfgRef->layers - 2].nodeCount * sizeof(float), \ hipMemcpyHostToDevice) \ != hipSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ else \ { \ procIndex += srcLayerRef[cfgRef->layers - 2].nodeCount; \ } \ } \ \ if(hipMemcpy(&dstLayerRef[i].baseMat.weight[procIndex], \ &srcLayerRef[i].nodeList[j].base.th, \ sizeof(float), \ hipMemcpyHostToDevice) \ != hipSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ } // Clone weight dstLayerRef = tmpLstmCuda->layerList; srcLayerRef = lstm->layerList; for(i = 1; i < cfgRef->layers; i++) { // Get node count and vector length nodeCount = srcLayerRef[i].nodeCount; vecLen = dstLayerRef[i].vecLen; // Clone node to cuda for(j = 0; j < nodeCount; j++) { // Clone gate network if(i < cfgRef->layers - 1) { __lstm_clone_base_to_cuda(LSTM_CUMAT_OG, ogNet); __lstm_clone_base_to_cuda(LSTM_CUMAT_FG, fgNet); __lstm_clone_base_to_cuda(LSTM_CUMAT_IG, igNet); } // Clone input network __lstm_clone_base_to_cuda(LSTM_CUMAT_INPUT, inputNet); } } // Assign value *lstmCudaPtr = tmpLstmCuda; goto RET; ERR: lstm_delete_cuda(tmpLstmCuda); RET: LOG("exit"); return ret; } int lstm_clone_from_cuda(lstm_t* lstmPtr, lstm_cuda_t lstmCuda) { int i, j; int vecLen, nodeCount; int ret = LSTM_NO_ERROR; lstm_t tmpLstm = NULL; struct LSTM_CULAYER* srcLayerRef; struct LSTM_LAYER* dstLayerRef; struct LSTM_CONFIG_STRUCT* cfgRef; LOG("enter"); // Get reference cfgRef = &lstmCuda->config; // Create lstm lstm_run(lstm_create(&tmpLstm, cfgRef), ret, RET); #define __lstm_clone_cuda_to_base(base, cuMat) \ { \ int procIndex = cuMat * (nodeCount * vecLen) + j * vecLen; \ if(hipMemcpy(dstLayerRef[i].nodeList[j].base.weight, \ &srcLayerRef[i].baseMat.weight[procIndex], \ dstLayerRef[i - 1].nodeCount * sizeof(float), \ hipMemcpyDeviceToHost) \ != hipSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ else \ { \ procIndex += dstLayerRef[i - 1].nodeCount; \ } \ \ if(i == 1) \ { \ if(hipMemcpy(dstLayerRef[i].nodeList[j].base.rWeight, \ &srcLayerRef[i].baseMat.weight[procIndex], \ dstLayerRef[cfgRef->layers - 2].nodeCount * sizeof(float), \ hipMemcpyDeviceToHost) \ != hipSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ else \ { \ procIndex += dstLayerRef[cfgRef->layers - 2].nodeCount; \ } \ } \ \ if(hipMemcpy(&dstLayerRef[i].nodeList[j].base.th, \ &srcLayerRef[i].baseMat.weight[procIndex], \ sizeof(float), \ hipMemcpyDeviceToHost) \ != hipSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ } // Clone weight srcLayerRef = lstmCuda->layerList; dstLayerRef = tmpLstm->layerList; for(i = 1; i < cfgRef->layers; i++) { // Get node count and vector length nodeCount = dstLayerRef[i].nodeCount; vecLen = srcLayerRef[i].vecLen; // Clone cuda node to host for(j = 0; j < nodeCount; j++) { // Clone gate network if(i < cfgRef->layers - 1) { __lstm_clone_cuda_to_base(ogNet, LSTM_CUMAT_OG); __lstm_clone_cuda_to_base(fgNet, LSTM_CUMAT_FG); __lstm_clone_cuda_to_base(igNet, LSTM_CUMAT_IG); } // Clone input network __lstm_clone_cuda_to_base(inputNet, LSTM_CUMAT_INPUT); } } // Assign value *lstmPtr = tmpLstm; goto RET; ERR: lstm_delete(tmpLstm); RET: LOG("exit"); return ret; }
5bc4ff049d602ee4152d8c5d371652f7b366d3b0.cu
#include <cuda_runtime.h> #include <debug.h> #include "lstm_cuda.h" #include "lstm_private_cuda.h" int lstm_clone_to_cuda(lstm_cuda_t* lstmCudaPtr, lstm_t lstm) { int i, j; int vecLen, nodeCount; int ret = LSTM_NO_ERROR; lstm_cuda_t tmpLstmCuda = NULL; struct LSTM_CULAYER* dstLayerRef; struct LSTM_LAYER* srcLayerRef; struct LSTM_CONFIG_STRUCT* cfgRef; LOG("enter"); // Get reference cfgRef = &lstm->config; // Create lstm cuda lstm_run(lstm_create_cuda(&tmpLstmCuda, cfgRef), ret, RET); #define __lstm_clone_base_to_cuda(cuMat, base) \ { \ int procIndex = cuMat * (nodeCount * vecLen) + j * vecLen; \ if(cudaMemcpy(&dstLayerRef[i].baseMat.weight[procIndex], \ srcLayerRef[i].nodeList[j].base.weight, \ srcLayerRef[i - 1].nodeCount * sizeof(float), \ cudaMemcpyHostToDevice) \ != cudaSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ else \ { \ procIndex += srcLayerRef[i - 1].nodeCount; \ } \ \ if(i == 1) \ { \ if(cudaMemcpy(&dstLayerRef[i].baseMat.weight[procIndex], \ srcLayerRef[i].nodeList[j].base.rWeight, \ srcLayerRef[cfgRef->layers - 2].nodeCount * sizeof(float), \ cudaMemcpyHostToDevice) \ != cudaSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ else \ { \ procIndex += srcLayerRef[cfgRef->layers - 2].nodeCount; \ } \ } \ \ if(cudaMemcpy(&dstLayerRef[i].baseMat.weight[procIndex], \ &srcLayerRef[i].nodeList[j].base.th, \ sizeof(float), \ cudaMemcpyHostToDevice) \ != cudaSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ } // Clone weight dstLayerRef = tmpLstmCuda->layerList; srcLayerRef = lstm->layerList; for(i = 1; i < cfgRef->layers; i++) { // Get node count and vector length nodeCount = srcLayerRef[i].nodeCount; vecLen = dstLayerRef[i].vecLen; // Clone node to cuda for(j = 0; j < nodeCount; j++) { // Clone gate network if(i < cfgRef->layers - 1) { __lstm_clone_base_to_cuda(LSTM_CUMAT_OG, ogNet); __lstm_clone_base_to_cuda(LSTM_CUMAT_FG, fgNet); __lstm_clone_base_to_cuda(LSTM_CUMAT_IG, igNet); } // Clone input network __lstm_clone_base_to_cuda(LSTM_CUMAT_INPUT, inputNet); } } // Assign value *lstmCudaPtr = tmpLstmCuda; goto RET; ERR: lstm_delete_cuda(tmpLstmCuda); RET: LOG("exit"); return ret; } int lstm_clone_from_cuda(lstm_t* lstmPtr, lstm_cuda_t lstmCuda) { int i, j; int vecLen, nodeCount; int ret = LSTM_NO_ERROR; lstm_t tmpLstm = NULL; struct LSTM_CULAYER* srcLayerRef; struct LSTM_LAYER* dstLayerRef; struct LSTM_CONFIG_STRUCT* cfgRef; LOG("enter"); // Get reference cfgRef = &lstmCuda->config; // Create lstm lstm_run(lstm_create(&tmpLstm, cfgRef), ret, RET); #define __lstm_clone_cuda_to_base(base, cuMat) \ { \ int procIndex = cuMat * (nodeCount * vecLen) + j * vecLen; \ if(cudaMemcpy(dstLayerRef[i].nodeList[j].base.weight, \ &srcLayerRef[i].baseMat.weight[procIndex], \ dstLayerRef[i - 1].nodeCount * sizeof(float), \ cudaMemcpyDeviceToHost) \ != cudaSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ else \ { \ procIndex += dstLayerRef[i - 1].nodeCount; \ } \ \ if(i == 1) \ { \ if(cudaMemcpy(dstLayerRef[i].nodeList[j].base.rWeight, \ &srcLayerRef[i].baseMat.weight[procIndex], \ dstLayerRef[cfgRef->layers - 2].nodeCount * sizeof(float), \ cudaMemcpyDeviceToHost) \ != cudaSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ else \ { \ procIndex += dstLayerRef[cfgRef->layers - 2].nodeCount; \ } \ } \ \ if(cudaMemcpy(&dstLayerRef[i].nodeList[j].base.th, \ &srcLayerRef[i].baseMat.weight[procIndex], \ sizeof(float), \ cudaMemcpyDeviceToHost) \ != cudaSuccess) \ { \ ret = LSTM_MEM_FAILED; \ goto ERR; \ } \ } // Clone weight srcLayerRef = lstmCuda->layerList; dstLayerRef = tmpLstm->layerList; for(i = 1; i < cfgRef->layers; i++) { // Get node count and vector length nodeCount = dstLayerRef[i].nodeCount; vecLen = srcLayerRef[i].vecLen; // Clone cuda node to host for(j = 0; j < nodeCount; j++) { // Clone gate network if(i < cfgRef->layers - 1) { __lstm_clone_cuda_to_base(ogNet, LSTM_CUMAT_OG); __lstm_clone_cuda_to_base(fgNet, LSTM_CUMAT_FG); __lstm_clone_cuda_to_base(igNet, LSTM_CUMAT_IG); } // Clone input network __lstm_clone_cuda_to_base(inputNet, LSTM_CUMAT_INPUT); } } // Assign value *lstmPtr = tmpLstm; goto RET; ERR: lstm_delete(tmpLstm); RET: LOG("exit"); return ret; }
275935f31693535aa5fd20c23e1aa923fbc492b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* CUDA version of the ray tracer program. * Combined CPE458/570 Project * * Brian Gomberg (bgomberg) * Luke Larson (lplarson) * Susan Marano (smarano) */ #include <stdio.h> #include <stdlib.h> #include "types.h" #include <time.h> #define NUM_SHAPES 153 #define PIE 3.14159 #define X_MAX 1023 #define Y_MAX 1023 #define BLOCK_SIZE 8 #define LIGHT_X 1 #define LIGHT_Y 0 #define LIGHT_Z 0.5 #define LIGHT_C 1 #define SPHERE_GLOSS 5 #define SPHERE_RADIUS_SQRD .015 //#define TIMING __device__ double intercept_sphere(ray_t ray, sphere_t sphere); __device__ coord_t cross_prod(const coord_t a, const coord_t b); __device__ double dot_prod(const coord_t *a, const coord_t *b); __device__ void normalize(coord_t *a); __device__ float sqrt2(float); __device__ float Inv_sqrt2(float); coord_t cross_prod_host(coord_t a, coord_t b); coord_t normalize_host(coord_t a); // http://stackoverflow.com/questions/13245258/handle-error-not-found-error-in-cuda static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) // in global memory: coord_t point, eye_t camera5, light_t light, sphere_t* spheres __device__ uchar4 DirectIllumination(coord_t point, light_t light, ray_t ray, sphere_t sphere, sphere_t *spheres); #ifdef TIMING __global__ void RayTracer(sphere_t *spheres, uchar4 *output_buffer, eye_t camera, light_t light, int *runtime, coord_t n, coord_t u, coord_t v) #else __global__ void RayTracer(sphere_t *spheres, uchar4 *output_buffer, eye_t camera, light_t light, coord_t n, coord_t u, coord_t v) #endif { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; coord_t s; #ifdef TIMING clock_t start_time = clock(); #endif // Bounds checking if (col > X_MAX || row > Y_MAX) return; //Find x and y values at the screen // Coords with respect to eye s.x = -0.5+(((double)col)/X_MAX); s.y = -0.5+(((double)row)/Y_MAX); s.z = 1; #ifdef TIMING if (row == 200 && col == 200) runtime[0] = (int)clock()-start_time; #endif // Convert from eye coordinate system to normal s.x = camera.eye.x + s.x*u.x + s.y*v.x + s.z*n.x; s.y = camera.eye.y + s.x*u.y + s.y*v.y + s.z*n.y; s.z = camera.eye.z + s.x*u.z + s.y*v.z + s.z*n.z; //Define ray ray_t curRay; curRay.dir.x = s.x - camera.eye.x; curRay.dir.y = s.y - camera.eye.y; curRay.dir.z = s.z - camera.eye.z; curRay.start = camera.eye; curRay.t = -1; #ifdef TIMING if (row == 200 && col == 200) runtime[1] = (int)clock()-start_time - runtime[0]; #endif float t; sphere_t sphere; //check which objects intersect with ray for(int o = 0; o < NUM_SHAPES; o++){ //TODO more shapes t = intercept_sphere(curRay, spheres[o]); if ((t > 0 )&&((t < curRay.t) || (curRay.t < 0))){ curRay.t = t; sphere = spheres[o]; } } #ifdef TIMING if (row == 200 && col == 200) runtime[2] = (int)clock()-start_time - runtime[1]; #endif // Put inside of DirectIllumination // Finds intersection from ray coord_t intercept; int idx = row*(X_MAX+1)+col; if (curRay.t > 0) { intercept.x = (curRay.start.x)+curRay.t*(curRay.dir.x); intercept.y = (curRay.start.y)+curRay.t*(curRay.dir.y); intercept.z = (curRay.start.z)+curRay.t*(curRay.dir.z); // Change intercept to t output_buffer[idx] = DirectIllumination(intercept, light, curRay, sphere, spheres); } else { output_buffer[idx].w = 0; output_buffer[idx].x = 0; output_buffer[idx].y = 0; output_buffer[idx].z = 0; } #ifdef TIMING if (row == 200 && col == 200) runtime[3] = (int)clock()-start_time - runtime[2]; #endif } eye_t camera; light_t light; sphere_t spheres[NUM_SHAPES]; coord_t n, v, u; extern "C" void init_cuda() { // Set up camera camera.eye.x = 0; camera.eye.y = 0; camera.eye.z = 0; camera.look.x = 0; camera.look.y = 0; camera.look.z = -1; camera.up.x = 0; camera.up.y = 1; camera.up.z = 0; // Set up light light.loc.x = 0; light.loc.y = 0; light.loc.z = 1; light.color.r = 1; light.color.g = 1; light.color.b = 1; // Set up sphere(s) srand(time(NULL)); int s = 0; /* double radius = 1; double center = 0; for(int i = 0; i < 10*PIE; i++){ spheres[s].center.x = center + radius * cos(((float)i)/5); spheres[s].center.y = center + radius * sin(((float)i)/5); spheres[s].center.z = 2; spheres[s].color.r = ((double)rand() / ((double)RAND_MAX + 1) ); spheres[s].color.g = ((double)rand() / ((double)RAND_MAX + 1) ); spheres[s].color.b = ((double)rand() / ((double)RAND_MAX + 1) ); spheres[s].spec = .5; spheres[s].name = s; s++; }*/ // trunck 20 spheres /* for(int i = 0; i < 20; i++){ spheres[s].center.x = 0; spheres[s].center.y = (double)i/20; spheres[s].center.z = 2; if(i%2){ spheres[s].color.r = .6; spheres[s].color.g = .3; spheres[s].color.b = .1; } else{ spheres[s].color.r = .5; spheres[s].color.g = .2; spheres[s].color.b = .1; } spheres[s].spec = 0; spheres[s].name = s; s++; } // leaves 62 spheres double radius; double center = 0; for(int i = 0; i < 20*PIE; i++){ radius = ((double)rand() / ((double)RAND_MAX + 1)/3); spheres[s].center.x = center + radius * cos(((float)i)/5); spheres[s].center.y = center + radius * sin(((float)i)/5); spheres[s].center.z = 1.75+((double)rand() / ((double)RAND_MAX + 1)/2); spheres[s].color.r = ((double)rand() / ((double)RAND_MAX + 1)/10); spheres[s].color.g = .7+ ((double)rand() / ((double)RAND_MAX + 1)/5); spheres[s].color.b = .2+ ((double)rand() / ((double)RAND_MAX + 1)/10); spheres[s].spec = .3; spheres[s].name = s; s++; } */ // lolly stick 20 spheres for(int i = 0; i < 20; i++){ spheres[s].center.x = 0; spheres[s].center.y = (double)i/10; spheres[s].center.z = 2; spheres[s].color.r = 1; spheres[s].color.g = 1; spheres[s].color.b = 1; spheres[s].spec = 0; spheres[s].name = s; s++; } // lolly lot spheres double radius = 0; double center = 0; for(int i = 0; i < 42.5*PIE; i++){ radius = radius + .1/55*PIE; spheres[s].center.x = center + radius * cos(((float)i)/5); spheres[s].center.y = center + radius * sin(((float)i)/5)-0.7; spheres[s].center.z = 2; spheres[s].color.r = ((double)rand() / ((double)RAND_MAX + 1)/1.3); spheres[s].color.g = 0*((double)rand() / ((double)RAND_MAX + 1)/10); spheres[s].color.b = ((double)rand() / ((double)RAND_MAX + 1)/1.3); spheres[s].spec = .3; spheres[s].name = s; s++; } //convert to proper plane n.x = camera.eye.x-camera.look.x; n.y = camera.eye.y-camera.look.y; n.z = camera.eye.z-camera.look.z; u = cross_prod_host(camera.up,n); v = cross_prod_host(n, u); u = normalize_host(u); v = normalize_host(v); n = normalize_host(n); } int copySpheres = 1, allocSpheres = 1; sphere_t *spheresd; extern "C" void run_cuda(uchar4 *dptr) { #ifdef TIMING int *runtime_d; HANDLE_ERROR(hipMalloc(&runtime_d, sizeof(int)*4)); #endif if (allocSpheres) { allocSpheres = 0; HANDLE_ERROR(hipMalloc(&spheresd, sizeof(sphere_t)*NUM_SHAPES)); } if (copySpheres) { copySpheres = 0; HANDLE_ERROR(hipMemcpy(spheresd, spheres, sizeof(sphere_t)*NUM_SHAPES, hipMemcpyHostToDevice)); } dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim((X_MAX+1+(BLOCK_SIZE-1))/BLOCK_SIZE, (Y_MAX+1+(BLOCK_SIZE)/BLOCK_SIZE)); #ifdef TIMING hipLaunchKernelGGL(( RayTracer), dim3(gridDim), dim3(blockDim), 0, 0, spheresd, dptr, camera, light, runtime_d, n, u, v); #else hipLaunchKernelGGL(( RayTracer), dim3(gridDim), dim3(blockDim), 0, 0, spheresd, dptr, camera, light, n, u, v); #endif #ifdef TIMING int runtime[4]; hipMemcpy(&runtime, runtime_d, sizeof(int)*4, hipMemcpyDeviceToHost); hipFree(runtime_d); #endif #ifdef TIMING printf("%d %d %d %d\n", runtime[0], runtime[1], runtime[2], runtime[3]); #endif } __device__ double intercept_sphere(ray_t ray, sphere_t sphere) { double discrim; double t1; double t2; coord_t temp; //camera - center temp.x = ray.start.x - sphere.center.x; temp.y = ray.start.y - sphere.center.y; temp.z = ray.start.z - sphere.center.z; //find and check discriminant double raydir_temp_dot = dot_prod(&ray.dir,&temp); double raydir_raydir_dot = dot_prod(&ray.dir,&ray.dir); double temp_temp_dot = dot_prod(&temp,&temp); discrim=(raydir_temp_dot*raydir_temp_dot-(raydir_raydir_dot)*(temp_temp_dot-SPHERE_RADIUS_SQRD)); if (discrim >= 0) { discrim = sqrt2(discrim); t1 = (-raydir_temp_dot+discrim)/(raydir_raydir_dot); if (t1 < 0) return -1; t2 = (-raydir_temp_dot-discrim)/(raydir_raydir_dot); return (t1<=t2)?t1:t2; } return -1; } __device__ uchar4 DirectIllumination(coord_t point, light_t light, ray_t ray, sphere_t sphere, sphere_t *spheres){ coord_t surfNorm; coord_t lightNorm; coord_t viewNorm; coord_t reflectNorm; ray_t lightRay; double diffuse; double spec = 0; uchar4 color; //calculate light normal lightNorm.x = light.loc.x-point.x; lightNorm.y = light.loc.y-point.y; lightNorm.z = light.loc.z-point.z; //check for shadows int noHit = 1; lightRay.start = point; lightRay.dir = lightNorm; for(int o = 0; o < NUM_SHAPES; o++){ if (sphere.name != spheres[o].name && (intercept_sphere(lightRay, spheres[o]) >= 0)) { noHit = 0; break; } } double r, g, b; //calculate color r = sphere.color.r*.2; g = sphere.color.g*.2; b = sphere.color.b*.2; if (noHit) { //calculate surface normal surfNorm.x = point.x - sphere.center.x; surfNorm.y = point.y - sphere.center.y; surfNorm.z = point.z - sphere.center.z; normalize(&surfNorm); //calculate diffuse color diffuse = dot_prod(&surfNorm,&lightNorm); if (diffuse > 1) diffuse = 1; diffuse *= !(diffuse < 0); if(diffuse > 0) { r += light.color.r*(sphere.color.r*diffuse); g += light.color.g*(sphere.color.g*diffuse); b += light.color.b*(sphere.color.b*diffuse); //calculate viewing normal viewNorm.x = -ray.dir.x; viewNorm.y = -ray.dir.y; viewNorm.z = -ray.dir.z; normalize(&viewNorm); //calculate reflection ray normal reflectNorm.x = (2*surfNorm.x*diffuse)-lightNorm.x; reflectNorm.y = (2*surfNorm.y*diffuse)-lightNorm.y; reflectNorm.z = (2*surfNorm.z*diffuse)-lightNorm.z; normalize(&reflectNorm); //calculate specular color spec = pow(dot_prod(&viewNorm, &reflectNorm),SPHERE_GLOSS); if (spec > 1) { //calculate color r += light.color.r*sphere.spec; g += light.color.g*sphere.spec; b += light.color.b*sphere.spec; } else if (spec > 0) { //calculate color r += light.color.r*(sphere.spec*spec); g += light.color.g*(sphere.spec*spec); b += light.color.b*(sphere.spec*spec); } } } r = r>1?1:r; g = g>1?1:g; b = b>1?1:b; color.w = 0; color.x = r * 255; color.y = g * 255; color.z = b * 255; return color; } __device__ double dot_prod(const coord_t *a, const coord_t *b){ return a->x * b->x + a->y * b->y + a->z * b->z; } __device__ coord_t cross_prod(const coord_t a, const coord_t b){ coord_t c; c.x = a.y*b.z - a.z*b.y; c.y = a.z*b.x - a.x*b.z; c.z = a.x*b.y - a.y*b.x; return c; } coord_t cross_prod_host(coord_t a, coord_t b){ coord_t c; c.x = a.y*b.z - a.z*b.y; c.y = a.z*b.x - a.x*b.z; c.z = a.x*b.y - a.y*b.x; return c; } __device__ void normalize(coord_t *a){ float mag_inv = Inv_sqrt2(dot_prod(a, a)); a->x = (a->x)*mag_inv; a->y = (a->y)*mag_inv; a->z = (a->z)*mag_inv; } coord_t normalize_host(coord_t a){ double mag = sqrt((a.x)*(a.x)+(a.y)*(a.y)+(a.z)*(a.z)); a.x = (a.x)/mag; a.y = (a.y)/mag; a.z = (a.z)/mag; return a; } // Inv_sqrt2 code from: http://forums.overclockers.co.uk/showthread.php?p=8773984 __device__ float Inv_sqrt2(float x) { float xhalf = 0.5f*x; int i = *(int*)&x; // get bits for floating value i = 0x5f375a86- (i>>1); // gives initial guess y0 x = *(float*)&i; // convert bits back to float x = x*(1.5f-xhalf*x*x); // Newton step, repeating increases accuracy return x; } // sqrt2 code from: http://www.codeproject.com/Articles/69941/Best-Square-Root-Method-Algorithm-Function-Precisi #define SQRT_MAGIC_F 0x5f3759df __device__ float sqrt2(float x) { const float xhalf = 0.5f*x; union // get bits for floating value { float x; int i; } u; u.x = x; u.i = SQRT_MAGIC_F - (u.i >> 1); // gives initial guess y0 return x*u.x*(1.5f - xhalf*u.x*u.x);// Newton step, repeating increases accuracy }
275935f31693535aa5fd20c23e1aa923fbc492b9.cu
/* CUDA version of the ray tracer program. * Combined CPE458/570 Project * * Brian Gomberg (bgomberg) * Luke Larson (lplarson) * Susan Marano (smarano) */ #include <stdio.h> #include <stdlib.h> #include "types.h" #include <time.h> #define NUM_SHAPES 153 #define PIE 3.14159 #define X_MAX 1023 #define Y_MAX 1023 #define BLOCK_SIZE 8 #define LIGHT_X 1 #define LIGHT_Y 0 #define LIGHT_Z 0.5 #define LIGHT_C 1 #define SPHERE_GLOSS 5 #define SPHERE_RADIUS_SQRD .015 //#define TIMING __device__ double intercept_sphere(ray_t ray, sphere_t sphere); __device__ coord_t cross_prod(const coord_t a, const coord_t b); __device__ double dot_prod(const coord_t *a, const coord_t *b); __device__ void normalize(coord_t *a); __device__ float sqrt2(float); __device__ float Inv_sqrt2(float); coord_t cross_prod_host(coord_t a, coord_t b); coord_t normalize_host(coord_t a); // http://stackoverflow.com/questions/13245258/handle-error-not-found-error-in-cuda static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) // in global memory: coord_t point, eye_t camera5, light_t light, sphere_t* spheres __device__ uchar4 DirectIllumination(coord_t point, light_t light, ray_t ray, sphere_t sphere, sphere_t *spheres); #ifdef TIMING __global__ void RayTracer(sphere_t *spheres, uchar4 *output_buffer, eye_t camera, light_t light, int *runtime, coord_t n, coord_t u, coord_t v) #else __global__ void RayTracer(sphere_t *spheres, uchar4 *output_buffer, eye_t camera, light_t light, coord_t n, coord_t u, coord_t v) #endif { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; coord_t s; #ifdef TIMING clock_t start_time = clock(); #endif // Bounds checking if (col > X_MAX || row > Y_MAX) return; //Find x and y values at the screen // Coords with respect to eye s.x = -0.5+(((double)col)/X_MAX); s.y = -0.5+(((double)row)/Y_MAX); s.z = 1; #ifdef TIMING if (row == 200 && col == 200) runtime[0] = (int)clock()-start_time; #endif // Convert from eye coordinate system to normal s.x = camera.eye.x + s.x*u.x + s.y*v.x + s.z*n.x; s.y = camera.eye.y + s.x*u.y + s.y*v.y + s.z*n.y; s.z = camera.eye.z + s.x*u.z + s.y*v.z + s.z*n.z; //Define ray ray_t curRay; curRay.dir.x = s.x - camera.eye.x; curRay.dir.y = s.y - camera.eye.y; curRay.dir.z = s.z - camera.eye.z; curRay.start = camera.eye; curRay.t = -1; #ifdef TIMING if (row == 200 && col == 200) runtime[1] = (int)clock()-start_time - runtime[0]; #endif float t; sphere_t sphere; //check which objects intersect with ray for(int o = 0; o < NUM_SHAPES; o++){ //TODO more shapes t = intercept_sphere(curRay, spheres[o]); if ((t > 0 )&&((t < curRay.t) || (curRay.t < 0))){ curRay.t = t; sphere = spheres[o]; } } #ifdef TIMING if (row == 200 && col == 200) runtime[2] = (int)clock()-start_time - runtime[1]; #endif // Put inside of DirectIllumination // Finds intersection from ray coord_t intercept; int idx = row*(X_MAX+1)+col; if (curRay.t > 0) { intercept.x = (curRay.start.x)+curRay.t*(curRay.dir.x); intercept.y = (curRay.start.y)+curRay.t*(curRay.dir.y); intercept.z = (curRay.start.z)+curRay.t*(curRay.dir.z); // Change intercept to t output_buffer[idx] = DirectIllumination(intercept, light, curRay, sphere, spheres); } else { output_buffer[idx].w = 0; output_buffer[idx].x = 0; output_buffer[idx].y = 0; output_buffer[idx].z = 0; } #ifdef TIMING if (row == 200 && col == 200) runtime[3] = (int)clock()-start_time - runtime[2]; #endif } eye_t camera; light_t light; sphere_t spheres[NUM_SHAPES]; coord_t n, v, u; extern "C" void init_cuda() { // Set up camera camera.eye.x = 0; camera.eye.y = 0; camera.eye.z = 0; camera.look.x = 0; camera.look.y = 0; camera.look.z = -1; camera.up.x = 0; camera.up.y = 1; camera.up.z = 0; // Set up light light.loc.x = 0; light.loc.y = 0; light.loc.z = 1; light.color.r = 1; light.color.g = 1; light.color.b = 1; // Set up sphere(s) srand(time(NULL)); int s = 0; /* double radius = 1; double center = 0; for(int i = 0; i < 10*PIE; i++){ spheres[s].center.x = center + radius * cos(((float)i)/5); spheres[s].center.y = center + radius * sin(((float)i)/5); spheres[s].center.z = 2; spheres[s].color.r = ((double)rand() / ((double)RAND_MAX + 1) ); spheres[s].color.g = ((double)rand() / ((double)RAND_MAX + 1) ); spheres[s].color.b = ((double)rand() / ((double)RAND_MAX + 1) ); spheres[s].spec = .5; spheres[s].name = s; s++; }*/ // trunck 20 spheres /* for(int i = 0; i < 20; i++){ spheres[s].center.x = 0; spheres[s].center.y = (double)i/20; spheres[s].center.z = 2; if(i%2){ spheres[s].color.r = .6; spheres[s].color.g = .3; spheres[s].color.b = .1; } else{ spheres[s].color.r = .5; spheres[s].color.g = .2; spheres[s].color.b = .1; } spheres[s].spec = 0; spheres[s].name = s; s++; } // leaves 62 spheres double radius; double center = 0; for(int i = 0; i < 20*PIE; i++){ radius = ((double)rand() / ((double)RAND_MAX + 1)/3); spheres[s].center.x = center + radius * cos(((float)i)/5); spheres[s].center.y = center + radius * sin(((float)i)/5); spheres[s].center.z = 1.75+((double)rand() / ((double)RAND_MAX + 1)/2); spheres[s].color.r = ((double)rand() / ((double)RAND_MAX + 1)/10); spheres[s].color.g = .7+ ((double)rand() / ((double)RAND_MAX + 1)/5); spheres[s].color.b = .2+ ((double)rand() / ((double)RAND_MAX + 1)/10); spheres[s].spec = .3; spheres[s].name = s; s++; } */ // lolly stick 20 spheres for(int i = 0; i < 20; i++){ spheres[s].center.x = 0; spheres[s].center.y = (double)i/10; spheres[s].center.z = 2; spheres[s].color.r = 1; spheres[s].color.g = 1; spheres[s].color.b = 1; spheres[s].spec = 0; spheres[s].name = s; s++; } // lolly lot spheres double radius = 0; double center = 0; for(int i = 0; i < 42.5*PIE; i++){ radius = radius + .1/55*PIE; spheres[s].center.x = center + radius * cos(((float)i)/5); spheres[s].center.y = center + radius * sin(((float)i)/5)-0.7; spheres[s].center.z = 2; spheres[s].color.r = ((double)rand() / ((double)RAND_MAX + 1)/1.3); spheres[s].color.g = 0*((double)rand() / ((double)RAND_MAX + 1)/10); spheres[s].color.b = ((double)rand() / ((double)RAND_MAX + 1)/1.3); spheres[s].spec = .3; spheres[s].name = s; s++; } //convert to proper plane n.x = camera.eye.x-camera.look.x; n.y = camera.eye.y-camera.look.y; n.z = camera.eye.z-camera.look.z; u = cross_prod_host(camera.up,n); v = cross_prod_host(n, u); u = normalize_host(u); v = normalize_host(v); n = normalize_host(n); } int copySpheres = 1, allocSpheres = 1; sphere_t *spheresd; extern "C" void run_cuda(uchar4 *dptr) { #ifdef TIMING int *runtime_d; HANDLE_ERROR(cudaMalloc(&runtime_d, sizeof(int)*4)); #endif if (allocSpheres) { allocSpheres = 0; HANDLE_ERROR(cudaMalloc(&spheresd, sizeof(sphere_t)*NUM_SHAPES)); } if (copySpheres) { copySpheres = 0; HANDLE_ERROR(cudaMemcpy(spheresd, spheres, sizeof(sphere_t)*NUM_SHAPES, cudaMemcpyHostToDevice)); } dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim((X_MAX+1+(BLOCK_SIZE-1))/BLOCK_SIZE, (Y_MAX+1+(BLOCK_SIZE)/BLOCK_SIZE)); #ifdef TIMING RayTracer<<<gridDim, blockDim>>>(spheresd, dptr, camera, light, runtime_d, n, u, v); #else RayTracer<<<gridDim, blockDim>>>(spheresd, dptr, camera, light, n, u, v); #endif #ifdef TIMING int runtime[4]; cudaMemcpy(&runtime, runtime_d, sizeof(int)*4, cudaMemcpyDeviceToHost); cudaFree(runtime_d); #endif #ifdef TIMING printf("%d %d %d %d\n", runtime[0], runtime[1], runtime[2], runtime[3]); #endif } __device__ double intercept_sphere(ray_t ray, sphere_t sphere) { double discrim; double t1; double t2; coord_t temp; //camera - center temp.x = ray.start.x - sphere.center.x; temp.y = ray.start.y - sphere.center.y; temp.z = ray.start.z - sphere.center.z; //find and check discriminant double raydir_temp_dot = dot_prod(&ray.dir,&temp); double raydir_raydir_dot = dot_prod(&ray.dir,&ray.dir); double temp_temp_dot = dot_prod(&temp,&temp); discrim=(raydir_temp_dot*raydir_temp_dot-(raydir_raydir_dot)*(temp_temp_dot-SPHERE_RADIUS_SQRD)); if (discrim >= 0) { discrim = sqrt2(discrim); t1 = (-raydir_temp_dot+discrim)/(raydir_raydir_dot); if (t1 < 0) return -1; t2 = (-raydir_temp_dot-discrim)/(raydir_raydir_dot); return (t1<=t2)?t1:t2; } return -1; } __device__ uchar4 DirectIllumination(coord_t point, light_t light, ray_t ray, sphere_t sphere, sphere_t *spheres){ coord_t surfNorm; coord_t lightNorm; coord_t viewNorm; coord_t reflectNorm; ray_t lightRay; double diffuse; double spec = 0; uchar4 color; //calculate light normal lightNorm.x = light.loc.x-point.x; lightNorm.y = light.loc.y-point.y; lightNorm.z = light.loc.z-point.z; //check for shadows int noHit = 1; lightRay.start = point; lightRay.dir = lightNorm; for(int o = 0; o < NUM_SHAPES; o++){ if (sphere.name != spheres[o].name && (intercept_sphere(lightRay, spheres[o]) >= 0)) { noHit = 0; break; } } double r, g, b; //calculate color r = sphere.color.r*.2; g = sphere.color.g*.2; b = sphere.color.b*.2; if (noHit) { //calculate surface normal surfNorm.x = point.x - sphere.center.x; surfNorm.y = point.y - sphere.center.y; surfNorm.z = point.z - sphere.center.z; normalize(&surfNorm); //calculate diffuse color diffuse = dot_prod(&surfNorm,&lightNorm); if (diffuse > 1) diffuse = 1; diffuse *= !(diffuse < 0); if(diffuse > 0) { r += light.color.r*(sphere.color.r*diffuse); g += light.color.g*(sphere.color.g*diffuse); b += light.color.b*(sphere.color.b*diffuse); //calculate viewing normal viewNorm.x = -ray.dir.x; viewNorm.y = -ray.dir.y; viewNorm.z = -ray.dir.z; normalize(&viewNorm); //calculate reflection ray normal reflectNorm.x = (2*surfNorm.x*diffuse)-lightNorm.x; reflectNorm.y = (2*surfNorm.y*diffuse)-lightNorm.y; reflectNorm.z = (2*surfNorm.z*diffuse)-lightNorm.z; normalize(&reflectNorm); //calculate specular color spec = pow(dot_prod(&viewNorm, &reflectNorm),SPHERE_GLOSS); if (spec > 1) { //calculate color r += light.color.r*sphere.spec; g += light.color.g*sphere.spec; b += light.color.b*sphere.spec; } else if (spec > 0) { //calculate color r += light.color.r*(sphere.spec*spec); g += light.color.g*(sphere.spec*spec); b += light.color.b*(sphere.spec*spec); } } } r = r>1?1:r; g = g>1?1:g; b = b>1?1:b; color.w = 0; color.x = r * 255; color.y = g * 255; color.z = b * 255; return color; } __device__ double dot_prod(const coord_t *a, const coord_t *b){ return a->x * b->x + a->y * b->y + a->z * b->z; } __device__ coord_t cross_prod(const coord_t a, const coord_t b){ coord_t c; c.x = a.y*b.z - a.z*b.y; c.y = a.z*b.x - a.x*b.z; c.z = a.x*b.y - a.y*b.x; return c; } coord_t cross_prod_host(coord_t a, coord_t b){ coord_t c; c.x = a.y*b.z - a.z*b.y; c.y = a.z*b.x - a.x*b.z; c.z = a.x*b.y - a.y*b.x; return c; } __device__ void normalize(coord_t *a){ float mag_inv = Inv_sqrt2(dot_prod(a, a)); a->x = (a->x)*mag_inv; a->y = (a->y)*mag_inv; a->z = (a->z)*mag_inv; } coord_t normalize_host(coord_t a){ double mag = sqrt((a.x)*(a.x)+(a.y)*(a.y)+(a.z)*(a.z)); a.x = (a.x)/mag; a.y = (a.y)/mag; a.z = (a.z)/mag; return a; } // Inv_sqrt2 code from: http://forums.overclockers.co.uk/showthread.php?p=8773984 __device__ float Inv_sqrt2(float x) { float xhalf = 0.5f*x; int i = *(int*)&x; // get bits for floating value i = 0x5f375a86- (i>>1); // gives initial guess y0 x = *(float*)&i; // convert bits back to float x = x*(1.5f-xhalf*x*x); // Newton step, repeating increases accuracy return x; } // sqrt2 code from: http://www.codeproject.com/Articles/69941/Best-Square-Root-Method-Algorithm-Function-Precisi #define SQRT_MAGIC_F 0x5f3759df __device__ float sqrt2(float x) { const float xhalf = 0.5f*x; union // get bits for floating value { float x; int i; } u; u.x = x; u.i = SQRT_MAGIC_F - (u.i >> 1); // gives initial guess y0 return x*u.x*(1.5f - xhalf*u.x*u.x);// Newton step, repeating increases accuracy }
b55e32bdb61d423b77e43be211742153814554cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <iostream> #include <float.h> #include <vector> __device__ inline int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void dense_voxelization_idx_gpu_kernel(int batch_size, int input_point_num, float resolution_w, float resolution_l, float resolution_h, int output_w, int output_l, int output_h, const float* input_coors, const int* input_num_list, int* input_accu_list, int* count_buffer, int* output_idx) { const int output_voxel_size = output_w * output_l * output_h; int point_id = threadIdx.x + blockIdx.x * blockDim.x; if (point_id < input_point_num) { int center_grid_coor_x = (int)floor(input_coors[point_id*3 + 0] / resolution_w); int center_grid_coor_y = (int)floor(input_coors[point_id*3 + 1] / resolution_l); int center_grid_coor_z = (int)floor(input_coors[point_id*3 + 2] / resolution_h); int batch_id = get_batch_id(input_accu_list, batch_size, point_id); int voxel_idx = batch_id * output_voxel_size + center_grid_coor_x * output_l * output_h + center_grid_coor_y * output_h + center_grid_coor_z; int count = atomicAdd(&count_buffer[voxel_idx], 1); if (count < 1) { output_idx[voxel_idx] = point_id; } } } __global__ void dense_voxelization_features_gpu_kernel(int batch_size, int channels, int output_w, int output_l, int output_h, const float* input_features, float* output_features, int* count_buffer, int* output_idx) { int voxel_id = threadIdx.x + blockIdx.x * blockDim.x; if (voxel_id < batch_size * output_w * output_l * output_h) { int count = count_buffer[voxel_id]; // for (int c = 0; c < channels; c++) { // output_features[voxel_id * channels + c] = -1.; // } if (count > 0) { int point_id = output_idx[voxel_id]; for (int c = 0; c < channels; c++) { output_features[voxel_id * channels + c] = input_features[point_id * channels + c]; // output_features[voxel_id * channels + c] = 1.; } } } } __global__ void dense_voxelization_grad_gpu_kernel(int batch_size, int channels, int output_w, int output_l, int output_h, const float* output_features_grad, const int* output_idx, float* input_features_grad) { int voxel_id = threadIdx.x + blockIdx.x * blockDim.x; if (voxel_id < batch_size * output_w * output_l * output_h) { int point_id = output_idx[voxel_id]; if (point_id >= 0) { for (int c = 0; c < channels; c++) { input_features_grad[point_id * channels + c] = output_features_grad[voxel_id * channels + c]; } } } } void dense_voxelization_gpu_launcher(int batch_size, int input_point_num, int channels, std::vector<float> resolution, std::vector<int> output_size, const float* input_coors, const float* input_features, const int* input_num_list, int* input_accu_list, int* count_buffer, float* output_features, int* output_idx) { if (batch_size*input_point_num <=0) { printf("DenseVoxelizationOp ERROR: Invalid CUDA input dimensions: [%d, %d]\n", batch_size, input_point_num); return; } int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_idx_gpu_kernel, 0, input_point_num); gridSize = (input_point_num + blockSize - 1) / blockSize; hipLaunchKernelGGL(( dense_voxelization_idx_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, batch_size, input_point_num, resolution[0], resolution[1], resolution[2], output_size[0], output_size[1], output_size[2], input_coors, input_num_list, input_accu_list, count_buffer, output_idx); hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_features_gpu_kernel, 0, batch_size * output_size[0] * output_size[1] * output_size[2]); gridSize = (batch_size * output_size[0] * output_size[1] * output_size[2] + blockSize - 1) / blockSize; hipLaunchKernelGGL(( dense_voxelization_features_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, batch_size, channels, output_size[0], output_size[1], output_size[2], input_features, output_features, count_buffer, output_idx); } void dense_voxelization_grad_gpu_launcher(int batch_size, int channels, std::vector<int> output_size, const float* output_features_grad, const int* output_idx, float* input_features_grad) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_grad_gpu_kernel, 0, batch_size * output_size[0] * output_size[1] * output_size[2]); gridSize = (batch_size * output_size[0] * output_size[1] * output_size[2] + blockSize - 1) / blockSize; hipLaunchKernelGGL(( dense_voxelization_grad_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, batch_size, channels, output_size[0], output_size[1], output_size[2], output_features_grad, output_idx, input_features_grad); }
b55e32bdb61d423b77e43be211742153814554cb.cu
/* Voxel sampling GPU implementation * Author Zhaoyu SU * All Rights Reserved. Sep., 2019. */ #include <stdio.h> #include <iostream> #include <float.h> #include <vector> __device__ inline int get_batch_id(int* accu_list, int batch_size, int id) { for (int b=0; b<batch_size-1; b++) { if (id >= accu_list[b]) { if(id < accu_list[b+1]) return b; } } return batch_size - 1; } __global__ void dense_voxelization_idx_gpu_kernel(int batch_size, int input_point_num, float resolution_w, float resolution_l, float resolution_h, int output_w, int output_l, int output_h, const float* input_coors, const int* input_num_list, int* input_accu_list, int* count_buffer, int* output_idx) { const int output_voxel_size = output_w * output_l * output_h; int point_id = threadIdx.x + blockIdx.x * blockDim.x; if (point_id < input_point_num) { int center_grid_coor_x = (int)floor(input_coors[point_id*3 + 0] / resolution_w); int center_grid_coor_y = (int)floor(input_coors[point_id*3 + 1] / resolution_l); int center_grid_coor_z = (int)floor(input_coors[point_id*3 + 2] / resolution_h); int batch_id = get_batch_id(input_accu_list, batch_size, point_id); int voxel_idx = batch_id * output_voxel_size + center_grid_coor_x * output_l * output_h + center_grid_coor_y * output_h + center_grid_coor_z; int count = atomicAdd(&count_buffer[voxel_idx], 1); if (count < 1) { output_idx[voxel_idx] = point_id; } } } __global__ void dense_voxelization_features_gpu_kernel(int batch_size, int channels, int output_w, int output_l, int output_h, const float* input_features, float* output_features, int* count_buffer, int* output_idx) { int voxel_id = threadIdx.x + blockIdx.x * blockDim.x; if (voxel_id < batch_size * output_w * output_l * output_h) { int count = count_buffer[voxel_id]; // for (int c = 0; c < channels; c++) { // output_features[voxel_id * channels + c] = -1.; // } if (count > 0) { int point_id = output_idx[voxel_id]; for (int c = 0; c < channels; c++) { output_features[voxel_id * channels + c] = input_features[point_id * channels + c]; // output_features[voxel_id * channels + c] = 1.; } } } } __global__ void dense_voxelization_grad_gpu_kernel(int batch_size, int channels, int output_w, int output_l, int output_h, const float* output_features_grad, const int* output_idx, float* input_features_grad) { int voxel_id = threadIdx.x + blockIdx.x * blockDim.x; if (voxel_id < batch_size * output_w * output_l * output_h) { int point_id = output_idx[voxel_id]; if (point_id >= 0) { for (int c = 0; c < channels; c++) { input_features_grad[point_id * channels + c] = output_features_grad[voxel_id * channels + c]; } } } } void dense_voxelization_gpu_launcher(int batch_size, int input_point_num, int channels, std::vector<float> resolution, std::vector<int> output_size, const float* input_coors, const float* input_features, const int* input_num_list, int* input_accu_list, int* count_buffer, float* output_features, int* output_idx) { if (batch_size*input_point_num <=0) { printf("DenseVoxelizationOp ERROR: Invalid CUDA input dimensions: [%d, %d]\n", batch_size, input_point_num); return; } int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_idx_gpu_kernel, 0, input_point_num); gridSize = (input_point_num + blockSize - 1) / blockSize; dense_voxelization_idx_gpu_kernel<<<gridSize, blockSize>>>(batch_size, input_point_num, resolution[0], resolution[1], resolution[2], output_size[0], output_size[1], output_size[2], input_coors, input_num_list, input_accu_list, count_buffer, output_idx); cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_features_gpu_kernel, 0, batch_size * output_size[0] * output_size[1] * output_size[2]); gridSize = (batch_size * output_size[0] * output_size[1] * output_size[2] + blockSize - 1) / blockSize; dense_voxelization_features_gpu_kernel<<<gridSize, blockSize>>>(batch_size, channels, output_size[0], output_size[1], output_size[2], input_features, output_features, count_buffer, output_idx); } void dense_voxelization_grad_gpu_launcher(int batch_size, int channels, std::vector<int> output_size, const float* output_features_grad, const int* output_idx, float* input_features_grad) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dense_voxelization_grad_gpu_kernel, 0, batch_size * output_size[0] * output_size[1] * output_size[2]); gridSize = (batch_size * output_size[0] * output_size[1] * output_size[2] + blockSize - 1) / blockSize; dense_voxelization_grad_gpu_kernel<<<gridSize, blockSize>>>(batch_size, channels, output_size[0], output_size[1], output_size[2], output_features_grad, output_idx, input_features_grad); }
892fb5bc78ced9000314d4aa65be378f1399914b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Modified for CSCS by Javier Otero ([email protected]) to * support both HIP and CUDA. * * Modifications for CSCS by Mark Klein ([email protected]) * - NVML bindings * - Reduced output * * original gpu_burn * Copyright (c) 2016, Ville Timonen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those * of the authors and should not be interpreted as representing official policies, * either expressed or implied, of the FreeBSD Project. */ #define SIZE 2048ul // Matrices are SIZE*SIZE.. 2048^2 should be efficiently implemented in CUBLAS #define USEMEM 0.9 // Try to allocate 90% of memory // Used to report op/s, measured through Visual Profiler, CUBLAS from CUDA 7.5 // (Seems that they indeed take the naive dim^3 approach) #define OPS_PER_MUL 17188257792ul #include <iostream> #include <cstdlib> #include <thread> #include <condition_variable> #include <type_traits> #include <vector> #include <array> #include <mutex> #include <algorithm> #include <functional> #include <memory> #include "Xdevice/runtime.hpp" #include "Xdevice/smi.hpp" #include "Xdevice/blas.hpp" // Actually, there are no rounding errors due to results being accumulated in an arbitrary order. // Therefore EPSILON = 0.0f is OK #define EPSILON 0.001f #define EPSILOND 0.0000001 namespace kernels { template<class T> __global__ void compare(T *C, int *numberOfErrors, size_t iters) { size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y; size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y gridDim.x*blockDim.x + // W blockIdx.x*blockDim.x + threadIdx.x; // X int localErrors = 0; for (size_t i = 1; i < iters; ++i) if (fabs(C[myIndex] - C[myIndex + i*iterStep]) > EPSILOND) localErrors++; atomicAdd(numberOfErrors, localErrors); } } template <class T> class GemmTest { private: int deviceId; // SMI handle to do system queries. Smi * smi_handle; // Iterations per call to this->compute size_t iters; long long int totalErrors; // Work arrays T * d_C; T * d_A; T * d_B; int * d_numberOfErrors; XblasHandle_t d_blas; static const int g_blockSize = 16; public: GemmTest(int id, Smi * smi_hand) : deviceId(id), smi_handle(smi_hand) { // Set the device and pin thread to CPU with XSetDevice(deviceId); smi_handle->setCpuAffinity(deviceId); // Create blas plan XblasCreate(&d_blas); totalErrors = 0; } ~GemmTest() { XFree(d_C); XFree(d_A); XFree(d_B); XFree(d_numberOfErrors); XblasDestroy(d_blas); XDeviceSynchronize(); } unsigned long long int getErrors() { unsigned long long int tempErrs = totalErrors; totalErrors = 0; return tempErrs; } size_t getIters() { return iters; } size_t availMemory() { size_t freeMem; smi_handle->getDeviceAvailMemorySize(deviceId, &freeMem); return freeMem; } void initBuffers(T * h_A, T * h_B) { size_t useBytes = (size_t)((double)availMemory()*USEMEM); size_t d_resultSize = sizeof(T)*SIZE*SIZE; iters = (useBytes - 2*d_resultSize)/d_resultSize; // We remove A and B sizes XMalloc((void**)&d_C, iters*d_resultSize); XMalloc((void**)&d_A, d_resultSize); XMalloc((void**)&d_B, d_resultSize); XMalloc((void**)&d_numberOfErrors, sizeof(int)); // Populating matrices A and B XMemcpy(d_A, h_A, d_resultSize, XMemcpyHostToDevice); XMemcpy(d_B, h_B, d_resultSize, XMemcpyHostToDevice); } void compute() = delete; void compare() { int numberOfErrors; XMemset(d_numberOfErrors, 0, sizeof(int)); dim3 block(g_blockSize,g_blockSize); dim3 grid(SIZE/g_blockSize,SIZE/g_blockSize); hipLaunchKernelGGL(( kernels::compare<T>), dim3(grid),dim3(block), 0, 0, (T*)d_C,(int*)d_numberOfErrors,(size_t)iters); XMemcpy(&numberOfErrors, d_numberOfErrors, sizeof(int), XMemcpyDeviceToHost); if (numberOfErrors) { totalErrors += (long long int)numberOfErrors; printf("WE FOUND %d FAULTY ELEMENTS from GPU %d\n", numberOfErrors, deviceId); } } }; template<> void GemmTest<double>::compute() { static const double alpha = 1.0; static const double beta = 0.0; for (size_t i = 0; i < iters; ++i) { XblasDgemm(d_blas, XBLAS_OP_N, XBLAS_OP_N, SIZE, SIZE, SIZE, &alpha, (const double*)d_A, SIZE, (const double*)d_B, SIZE, &beta, d_C + i*SIZE*SIZE, SIZE); } } template<> void GemmTest<float>::compute() { static const float alpha = 1.0; static const float beta = 0.0; for (size_t i = 0; i < iters; ++i) { XblasSgemm(d_blas, XBLAS_OP_N, XBLAS_OP_N, SIZE, SIZE, SIZE, &alpha, (const float*)d_A, SIZE, (const float*)d_B, SIZE, &beta, d_C + i*SIZE*SIZE, SIZE); } } class BurnTracker { /* Timing class that keeps track of the progress made by a single thread * through the burn process. * * All the member functions are thread-safe. These could be accessed by the * master/slave thread at any time to read/write data. * * When the read function, the counters are reset. */ public: std::mutex mtx; size_t iters, reps, err; std::chrono::system_clock::time_point start, end; BurnTracker() { std::lock_guard<std::mutex> lg(mtx); err = 0; iters = 0; reps = 0; }; void set_iters(size_t it) { std::lock_guard<std::mutex> lg(mtx); iters = it; } void start_timer() { std::lock_guard<std::mutex> lg(mtx); start = std::chrono::system_clock::now(); } void log(size_t e) { std::lock_guard<std::mutex> lg(mtx); end = std::chrono::system_clock::now(); reps++; err += e; } double read() { std::lock_guard<std::mutex> lg(mtx); // Failure checking if (err) return -1; // Get the time difference and return the flops std::chrono::duration<double> diff = end-start; double Gflops = 1e-9 * iters * reps * OPS_PER_MUL / diff.count(); // Reset the counters err = 0; reps = 0; start = end; return Gflops; } }; // Global vars for inter-thread communication. std::condition_variable cv; std::mutex cv_m; volatile bool burn = false; volatile int startUpCounter = 0; int devCount; template<class T> void startBurn(int devId, Smi * smi_handle, T *A, T *B, BurnTracker * bt ) { GemmTest<T> test(devId, smi_handle); test.initBuffers(A, B); // Log the number of iterations per compute call bt->set_iters(test.getIters()); // Warmup burn test.compute(); XDeviceSynchronize(); { // Flag that this thread is done with the warmup. std::lock_guard<std::mutex> lg(cv_m); ++startUpCounter; cv.notify_all(); } // Hold off any computation until all threads are go. { std::unique_lock<std::mutex> lk(cv_m); cv.wait(lk, []{return burn;}); } bt->start_timer(); // The actual work while (burn) { test.compute(); test.compare(); // Update the results bt->log(test.getErrors()); } } template<class T> void launch(int duration) { // Initializing A and B with random data T *A = (T*) malloc(sizeof(T)*SIZE*SIZE); T *B = (T*) malloc(sizeof(T)*SIZE*SIZE); srand(10); for (size_t i = 0; i < SIZE*SIZE; ++i) { A[i] = (T)((double)(rand()%1000000)/100000.0); B[i] = (T)((double)(rand()%1000000)/100000.0); } char hostname[256]; hostname[255]='\0'; gethostname(hostname,255); // Initialise the SMI Smi smi_handle; // Here burn is a switch that holds and breaks the work done by the slave threads. burn = false; XGetDeviceCount(&devCount); std::vector<std::thread> threads; // Print device count printf("[%s] Found %d device(s).\n", hostname, devCount); // All the burn info is stored in instances of the BurnTracker class. BurnTracker ** trackThreads = new BurnTracker*[devCount]; // Create one thread per device - burn is still off here. for (int i = 0; i < devCount; i++) { trackThreads[i] = new BurnTracker(); threads.push_back(std::thread(startBurn<T>, i, &smi_handle, A, B, trackThreads[i] ) ); } // Hold until all the threads are done with the init. { std::unique_lock<std::mutex> lk(cv_m); cv.wait(lk, []{return startUpCounter == devCount;}); } // Burn-time. burn = true; cv.notify_all(); std::this_thread::sleep_for( std::chrono::seconds(duration) ); // Burn-time done. burn = false; // Process output for (int i = 0; i < devCount; i++) { double flops = trackThreads[i]->read(); float devTemp; smi_handle.getGpuTemp(i, &devTemp); printf("[%s] GPU %2d(%s): %4.0f GF/s %d Celsius\n", hostname, i, flops < 0.0 ? "FAULTY" : "OK", flops, (int)devTemp); } // Join all threads std::for_each(threads.begin(), threads.end(), std::mem_fn(&std::thread::join)); // Cleanup free(A); free(B); for (int i = 0; i < devCount; i++) { delete trackThreads[i]; } delete [] trackThreads; } int main(int argc, char **argv) { /* * The time of the burn can be set by passing the time in seconds as an * as an executable argument. If this value is prepended with the `-d` option, * the matrix operations will be double-precesion. * * By default, the code will run for 10s in single-precision mode. */ int runLength = 10; bool useDoubles = false; int thisParam = 0; if (argc >= 2 && std::string(argv[1]) == "-d") { useDoubles = true; thisParam++; } if (argc-thisParam < 2) printf("Run length not specified in the command line. Burning for 10 secs\n"); else runLength = atoi(argv[1+thisParam]); if (useDoubles) { launch<double>(runLength); } else { launch<float>(runLength); } return 0; }
892fb5bc78ced9000314d4aa65be378f1399914b.cu
/* * Modified for CSCS by Javier Otero ([email protected]) to * support both HIP and CUDA. * * Modifications for CSCS by Mark Klein ([email protected]) * - NVML bindings * - Reduced output * * original gpu_burn * Copyright (c) 2016, Ville Timonen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those * of the authors and should not be interpreted as representing official policies, * either expressed or implied, of the FreeBSD Project. */ #define SIZE 2048ul // Matrices are SIZE*SIZE.. 2048^2 should be efficiently implemented in CUBLAS #define USEMEM 0.9 // Try to allocate 90% of memory // Used to report op/s, measured through Visual Profiler, CUBLAS from CUDA 7.5 // (Seems that they indeed take the naive dim^3 approach) #define OPS_PER_MUL 17188257792ul #include <iostream> #include <cstdlib> #include <thread> #include <condition_variable> #include <type_traits> #include <vector> #include <array> #include <mutex> #include <algorithm> #include <functional> #include <memory> #include "Xdevice/runtime.hpp" #include "Xdevice/smi.hpp" #include "Xdevice/blas.hpp" // Actually, there are no rounding errors due to results being accumulated in an arbitrary order. // Therefore EPSILON = 0.0f is OK #define EPSILON 0.001f #define EPSILOND 0.0000001 namespace kernels { template<class T> __global__ void compare(T *C, int *numberOfErrors, size_t iters) { size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y; size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y gridDim.x*blockDim.x + // W blockIdx.x*blockDim.x + threadIdx.x; // X int localErrors = 0; for (size_t i = 1; i < iters; ++i) if (fabs(C[myIndex] - C[myIndex + i*iterStep]) > EPSILOND) localErrors++; atomicAdd(numberOfErrors, localErrors); } } template <class T> class GemmTest { private: int deviceId; // SMI handle to do system queries. Smi * smi_handle; // Iterations per call to this->compute size_t iters; long long int totalErrors; // Work arrays T * d_C; T * d_A; T * d_B; int * d_numberOfErrors; XblasHandle_t d_blas; static const int g_blockSize = 16; public: GemmTest(int id, Smi * smi_hand) : deviceId(id), smi_handle(smi_hand) { // Set the device and pin thread to CPU with XSetDevice(deviceId); smi_handle->setCpuAffinity(deviceId); // Create blas plan XblasCreate(&d_blas); totalErrors = 0; } ~GemmTest() { XFree(d_C); XFree(d_A); XFree(d_B); XFree(d_numberOfErrors); XblasDestroy(d_blas); XDeviceSynchronize(); } unsigned long long int getErrors() { unsigned long long int tempErrs = totalErrors; totalErrors = 0; return tempErrs; } size_t getIters() { return iters; } size_t availMemory() { size_t freeMem; smi_handle->getDeviceAvailMemorySize(deviceId, &freeMem); return freeMem; } void initBuffers(T * h_A, T * h_B) { size_t useBytes = (size_t)((double)availMemory()*USEMEM); size_t d_resultSize = sizeof(T)*SIZE*SIZE; iters = (useBytes - 2*d_resultSize)/d_resultSize; // We remove A and B sizes XMalloc((void**)&d_C, iters*d_resultSize); XMalloc((void**)&d_A, d_resultSize); XMalloc((void**)&d_B, d_resultSize); XMalloc((void**)&d_numberOfErrors, sizeof(int)); // Populating matrices A and B XMemcpy(d_A, h_A, d_resultSize, XMemcpyHostToDevice); XMemcpy(d_B, h_B, d_resultSize, XMemcpyHostToDevice); } void compute() = delete; void compare() { int numberOfErrors; XMemset(d_numberOfErrors, 0, sizeof(int)); dim3 block(g_blockSize,g_blockSize); dim3 grid(SIZE/g_blockSize,SIZE/g_blockSize); kernels::compare<T><<<grid,block>>>((T*)d_C,(int*)d_numberOfErrors,(size_t)iters); XMemcpy(&numberOfErrors, d_numberOfErrors, sizeof(int), XMemcpyDeviceToHost); if (numberOfErrors) { totalErrors += (long long int)numberOfErrors; printf("WE FOUND %d FAULTY ELEMENTS from GPU %d\n", numberOfErrors, deviceId); } } }; template<> void GemmTest<double>::compute() { static const double alpha = 1.0; static const double beta = 0.0; for (size_t i = 0; i < iters; ++i) { XblasDgemm(d_blas, XBLAS_OP_N, XBLAS_OP_N, SIZE, SIZE, SIZE, &alpha, (const double*)d_A, SIZE, (const double*)d_B, SIZE, &beta, d_C + i*SIZE*SIZE, SIZE); } } template<> void GemmTest<float>::compute() { static const float alpha = 1.0; static const float beta = 0.0; for (size_t i = 0; i < iters; ++i) { XblasSgemm(d_blas, XBLAS_OP_N, XBLAS_OP_N, SIZE, SIZE, SIZE, &alpha, (const float*)d_A, SIZE, (const float*)d_B, SIZE, &beta, d_C + i*SIZE*SIZE, SIZE); } } class BurnTracker { /* Timing class that keeps track of the progress made by a single thread * through the burn process. * * All the member functions are thread-safe. These could be accessed by the * master/slave thread at any time to read/write data. * * When the read function, the counters are reset. */ public: std::mutex mtx; size_t iters, reps, err; std::chrono::system_clock::time_point start, end; BurnTracker() { std::lock_guard<std::mutex> lg(mtx); err = 0; iters = 0; reps = 0; }; void set_iters(size_t it) { std::lock_guard<std::mutex> lg(mtx); iters = it; } void start_timer() { std::lock_guard<std::mutex> lg(mtx); start = std::chrono::system_clock::now(); } void log(size_t e) { std::lock_guard<std::mutex> lg(mtx); end = std::chrono::system_clock::now(); reps++; err += e; } double read() { std::lock_guard<std::mutex> lg(mtx); // Failure checking if (err) return -1; // Get the time difference and return the flops std::chrono::duration<double> diff = end-start; double Gflops = 1e-9 * iters * reps * OPS_PER_MUL / diff.count(); // Reset the counters err = 0; reps = 0; start = end; return Gflops; } }; // Global vars for inter-thread communication. std::condition_variable cv; std::mutex cv_m; volatile bool burn = false; volatile int startUpCounter = 0; int devCount; template<class T> void startBurn(int devId, Smi * smi_handle, T *A, T *B, BurnTracker * bt ) { GemmTest<T> test(devId, smi_handle); test.initBuffers(A, B); // Log the number of iterations per compute call bt->set_iters(test.getIters()); // Warmup burn test.compute(); XDeviceSynchronize(); { // Flag that this thread is done with the warmup. std::lock_guard<std::mutex> lg(cv_m); ++startUpCounter; cv.notify_all(); } // Hold off any computation until all threads are go. { std::unique_lock<std::mutex> lk(cv_m); cv.wait(lk, []{return burn;}); } bt->start_timer(); // The actual work while (burn) { test.compute(); test.compare(); // Update the results bt->log(test.getErrors()); } } template<class T> void launch(int duration) { // Initializing A and B with random data T *A = (T*) malloc(sizeof(T)*SIZE*SIZE); T *B = (T*) malloc(sizeof(T)*SIZE*SIZE); srand(10); for (size_t i = 0; i < SIZE*SIZE; ++i) { A[i] = (T)((double)(rand()%1000000)/100000.0); B[i] = (T)((double)(rand()%1000000)/100000.0); } char hostname[256]; hostname[255]='\0'; gethostname(hostname,255); // Initialise the SMI Smi smi_handle; // Here burn is a switch that holds and breaks the work done by the slave threads. burn = false; XGetDeviceCount(&devCount); std::vector<std::thread> threads; // Print device count printf("[%s] Found %d device(s).\n", hostname, devCount); // All the burn info is stored in instances of the BurnTracker class. BurnTracker ** trackThreads = new BurnTracker*[devCount]; // Create one thread per device - burn is still off here. for (int i = 0; i < devCount; i++) { trackThreads[i] = new BurnTracker(); threads.push_back(std::thread(startBurn<T>, i, &smi_handle, A, B, trackThreads[i] ) ); } // Hold until all the threads are done with the init. { std::unique_lock<std::mutex> lk(cv_m); cv.wait(lk, []{return startUpCounter == devCount;}); } // Burn-time. burn = true; cv.notify_all(); std::this_thread::sleep_for( std::chrono::seconds(duration) ); // Burn-time done. burn = false; // Process output for (int i = 0; i < devCount; i++) { double flops = trackThreads[i]->read(); float devTemp; smi_handle.getGpuTemp(i, &devTemp); printf("[%s] GPU %2d(%s): %4.0f GF/s %d Celsius\n", hostname, i, flops < 0.0 ? "FAULTY" : "OK", flops, (int)devTemp); } // Join all threads std::for_each(threads.begin(), threads.end(), std::mem_fn(&std::thread::join)); // Cleanup free(A); free(B); for (int i = 0; i < devCount; i++) { delete trackThreads[i]; } delete [] trackThreads; } int main(int argc, char **argv) { /* * The time of the burn can be set by passing the time in seconds as an * as an executable argument. If this value is prepended with the `-d` option, * the matrix operations will be double-precesion. * * By default, the code will run for 10s in single-precision mode. */ int runLength = 10; bool useDoubles = false; int thisParam = 0; if (argc >= 2 && std::string(argv[1]) == "-d") { useDoubles = true; thisParam++; } if (argc-thisParam < 2) printf("Run length not specified in the command line. Burning for 10 secs\n"); else runLength = atoi(argv[1+thisParam]); if (useDoubles) { launch<double>(runLength); } else { launch<float>(runLength); } return 0; }
7e01a2e49fdccf241e7841598a4ba0ffd1627cc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "cpu_bitmap.h" #include <cmath> static constexpr float INF = 2e10f; static constexpr unsigned SPHERES = 20u; static constexpr int DIM = 1024; struct Sphere { float r, g, b; float radius; float x, y, z; // given a ray shot from the pixel at (ox, oy), // hit() computes whether the ray intersects the sphere. // if the ray does intersect the sphere, return the distance // from the camera where the ray hits the sphere __device__ float hit(float ox, float oy, float *n) { float dx = ox - x; float dy = oy - y; if (dx * dx + dy * dy < radius * radius) { float dz = sqrtf( radius*radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } }; static inline float rnd(float x) { return x * static_cast<float>(rand()) / static_cast<float>(RAND_MAX); } __global__ void kernel(unsigned char *ptr, Sphere *s) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float ox = float(x - DIM / 2); float oy = float(y - DIM / 2); float r = .0f, g = .0f, b = .0f; float maxz = -INF; for (unsigned i = 0; i < SPHERES; i++) { float n; float t = s[i].hit(ox, oy, &n); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset * 4 + 0] = int(r * 255.0f); ptr[offset * 4 + 1] = int(g * 255.0f); ptr[offset * 4 + 2] = int(b * 255.0f); ptr[offset * 4 + 3] = 255; } int main() { hipEvent_t start, stop; CHECK(hipEventCreate(&start)); CHECK(hipEventCreate(&stop)); CHECK(hipEventRecord(start, 0)); CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; CHECK(hipMalloc(&dev_bitmap, bitmap.image_size())); cuda::vector<Sphere> spheres(SPHERES); for (unsigned i = 0; i < SPHERES; i++) { Sphere *s = &spheres[i]; s->r = rnd(1.0f); s->g = rnd(1.0f); s->b = rnd(1.0f); s->x = rnd(1000.0f) - 500.0f; s->y = rnd(1000.0f) - 500.0f; s->z = rnd(1000.0f) - 500.0f; s->radius = rnd(100.0f) + 20.0f; } dim3 grids(DIM / 16, DIM / 16); dim3 threads(16, 16); hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, dev_bitmap, spheres.data()); CHECK(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost)); CHECK(hipEventRecord(stop, 0)); CHECK(hipEventSynchronize(stop)); float elapsed_time = .0f; CHECK(hipEventElapsedTime(&elapsed_time, start, stop)); printf("Time to generate: %3.1f ms\n", elapsed_time); CHECK(hipEventDestroy(start)); CHECK(hipEventDestroy(stop)); bitmap.display_and_exit(); CHECK(hipFree(dev_bitmap)); return 0; }
7e01a2e49fdccf241e7841598a4ba0ffd1627cc9.cu
#include "common.h" #include "cpu_bitmap.h" #include <cmath> static constexpr float INF = 2e10f; static constexpr unsigned SPHERES = 20u; static constexpr int DIM = 1024; struct Sphere { float r, g, b; float radius; float x, y, z; // given a ray shot from the pixel at (ox, oy), // hit() computes whether the ray intersects the sphere. // if the ray does intersect the sphere, return the distance // from the camera where the ray hits the sphere __device__ float hit(float ox, float oy, float *n) { float dx = ox - x; float dy = oy - y; if (dx * dx + dy * dy < radius * radius) { float dz = sqrtf( radius*radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } }; static inline float rnd(float x) { return x * static_cast<float>(rand()) / static_cast<float>(RAND_MAX); } __global__ void kernel(unsigned char *ptr, Sphere *s) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float ox = float(x - DIM / 2); float oy = float(y - DIM / 2); float r = .0f, g = .0f, b = .0f; float maxz = -INF; for (unsigned i = 0; i < SPHERES; i++) { float n; float t = s[i].hit(ox, oy, &n); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset * 4 + 0] = int(r * 255.0f); ptr[offset * 4 + 1] = int(g * 255.0f); ptr[offset * 4 + 2] = int(b * 255.0f); ptr[offset * 4 + 3] = 255; } int main() { cudaEvent_t start, stop; CHECK(cudaEventCreate(&start)); CHECK(cudaEventCreate(&stop)); CHECK(cudaEventRecord(start, 0)); CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; CHECK(cudaMalloc(&dev_bitmap, bitmap.image_size())); cuda::vector<Sphere> spheres(SPHERES); for (unsigned i = 0; i < SPHERES; i++) { Sphere *s = &spheres[i]; s->r = rnd(1.0f); s->g = rnd(1.0f); s->b = rnd(1.0f); s->x = rnd(1000.0f) - 500.0f; s->y = rnd(1000.0f) - 500.0f; s->z = rnd(1000.0f) - 500.0f; s->radius = rnd(100.0f) + 20.0f; } dim3 grids(DIM / 16, DIM / 16); dim3 threads(16, 16); kernel<<<grids, threads>>>(dev_bitmap, spheres.data()); CHECK(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost)); CHECK(cudaEventRecord(stop, 0)); CHECK(cudaEventSynchronize(stop)); float elapsed_time = .0f; CHECK(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Time to generate: %3.1f ms\n", elapsed_time); CHECK(cudaEventDestroy(start)); CHECK(cudaEventDestroy(stop)); bitmap.display_and_exit(); CHECK(cudaFree(dev_bitmap)); return 0; }
f69b3ab46dcba4483b65e3d7f90033580bf0bbd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void VecAdd(float *color, unsigned int atoms) { int j = threadIdx.x +blockDim.x *blockIdx.x; if(j < atoms) //if index is less than 104014 { if(color[j] < 0.45) //if Array is less 45% { color[j] = .000001; //then the glass is all shattered } } }
f69b3ab46dcba4483b65e3d7f90033580bf0bbd0.cu
#include <stdio.h> #include <stdlib.h> __global__ void VecAdd(float *color, unsigned int atoms) { int j = threadIdx.x +blockDim.x *blockIdx.x; if(j < atoms) //if index is less than 104014 { if(color[j] < 0.45) //if Array is less 45% { color[j] = .000001; //then the glass is all shattered } } }
40d5ef3741975980b92bf4574069a3512c282912.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void create_combined_escape_carry_newline_count_index(char *file, long n, char *escape_carry_index, int *newline_count_index) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; long normal_chars_per_thread = max((n+stride-1) / stride, 64L); long chars_per_thread = ((normal_chars_per_thread + 64 - 1) / 64) * 64; long start = index * chars_per_thread; long end = start + chars_per_thread; // There are essentially two cases: // - The last character in the previous block is an escape character. // - The last character in the previous block is not an escape character. // However, we don't know in advance which one it is, because // we are not sequential. So, here we'll basically // calculate the carry of each thread assuming the initial // carry is 0. char carry = 0; int count = 0; for (long i = start; i < end && i < n; i += 1) { char value = file[i]; if (value == '\\') { carry = 1 ^ carry; } else { carry = 0; } if (value == '\n') { count += 1; } } escape_carry_index[index] = carry; newline_count_index[index] = count; }
40d5ef3741975980b92bf4574069a3512c282912.cu
__global__ void create_combined_escape_carry_newline_count_index(char *file, long n, char *escape_carry_index, int *newline_count_index) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; long normal_chars_per_thread = max((n+stride-1) / stride, 64L); long chars_per_thread = ((normal_chars_per_thread + 64 - 1) / 64) * 64; long start = index * chars_per_thread; long end = start + chars_per_thread; // There are essentially two cases: // - The last character in the previous block is an escape character. // - The last character in the previous block is not an escape character. // However, we don't know in advance which one it is, because // we are not sequential. So, here we'll basically // calculate the carry of each thread assuming the initial // carry is 0. char carry = 0; int count = 0; for (long i = start; i < end && i < n; i += 1) { char value = file[i]; if (value == '\\') { carry = 1 ^ carry; } else { carry = 0; } if (value == '\n') { count += 1; } } escape_carry_index[index] = carry; newline_count_index[index] = count; }
849f97d85fcbb1c085fa3c0d36ae9dcce284f7a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <rocblas.h> #include <time.h> #include <iostream> #define size 1024 __global__ void matrixMul(int *a, int *b, int *c){ int my_x, my_y; my_x = blockIdx.x*blockDim.x + threadIdx.x; my_y = blockIdx.y*blockDim.y + threadIdx.y; int local_c = 0; for(int i = 0 ; i < size; i++) local_c += a[my_x * size + i] * b[i * size + my_y]; c[my_x * size + my_y ] = local_c; } int main(int argc, char const *argv[]) { int n = 1024; struct timespec start, stop; double time; int* a = (int*)malloc(sizeof(int)*n*n); int* b = (int*)malloc(sizeof(int)*n*n); int* c = (int*)malloc(sizeof(int)*n*n); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { a[i*n + j] = 1; b[i*n + j] = 2; c[i*n + j] = 0; } } int *gpu_a, *gpu_b, *gpu_c; hipMalloc((void**)&gpu_a, sizeof(int)*n*n); hipMalloc((void**)&gpu_b, sizeof(int)*n*n); hipMalloc((void**)&gpu_c, sizeof(int)*n*n); hipMemcpy(gpu_a, a, sizeof(int)*n*n, hipMemcpyHostToDevice); hipMemcpy(gpu_b, b, sizeof(int)*n*n, hipMemcpyHostToDevice); dim3 dimGrid(64, 64); dim3 dimBlock(16, 16); if(clock_gettime(CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); } hipLaunchKernelGGL(( matrixMul), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_a, gpu_b, gpu_c); hipMemcpy(c, gpu_c, sizeof(int)*n*n, hipMemcpyDeviceToHost); if(clock_gettime(CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" ); } time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9; printf("time is %f ns\n", time*1e9); std::cout << c[451*n + 451] << std::endl; free(a); free(b); free(c); hipFree(gpu_a); hipFree(gpu_b); hipFree(gpu_c); return 0; }
849f97d85fcbb1c085fa3c0d36ae9dcce284f7a5.cu
#include <stdlib.h> #include <stdio.h> #include <cublas.h> #include <time.h> #include <iostream> #define size 1024 __global__ void matrixMul(int *a, int *b, int *c){ int my_x, my_y; my_x = blockIdx.x*blockDim.x + threadIdx.x; my_y = blockIdx.y*blockDim.y + threadIdx.y; int local_c = 0; for(int i = 0 ; i < size; i++) local_c += a[my_x * size + i] * b[i * size + my_y]; c[my_x * size + my_y ] = local_c; } int main(int argc, char const *argv[]) { int n = 1024; struct timespec start, stop; double time; int* a = (int*)malloc(sizeof(int)*n*n); int* b = (int*)malloc(sizeof(int)*n*n); int* c = (int*)malloc(sizeof(int)*n*n); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { a[i*n + j] = 1; b[i*n + j] = 2; c[i*n + j] = 0; } } int *gpu_a, *gpu_b, *gpu_c; cudaMalloc((void**)&gpu_a, sizeof(int)*n*n); cudaMalloc((void**)&gpu_b, sizeof(int)*n*n); cudaMalloc((void**)&gpu_c, sizeof(int)*n*n); cudaMemcpy(gpu_a, a, sizeof(int)*n*n, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, b, sizeof(int)*n*n, cudaMemcpyHostToDevice); dim3 dimGrid(64, 64); dim3 dimBlock(16, 16); if(clock_gettime(CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); } matrixMul<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c); cudaMemcpy(c, gpu_c, sizeof(int)*n*n, cudaMemcpyDeviceToHost); if(clock_gettime(CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" ); } time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9; printf("time is %f ns\n", time*1e9); std::cout << c[451*n + 451] << std::endl; free(a); free(b); free(c); cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c); return 0; }
0f75689c5d93983c99ad6cb0174683b1c3446c13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/flip/flip.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./flip.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { static const int BX = 16; static const int BY = 16; namespace { #define rep(i, n) for (size_t i = 0; i < (n); ++i) template <typename T, bool vertical, bool horizontal, size_t IC> __global__ void flip_kern(const T *src, T *dst, size_t N, size_t H, size_t W, size_t stride1, size_t stride2, size_t stride3) { __shared__ T cache[BX][BY][IC]; int ow = blockIdx.x * blockDim.x + threadIdx.x; int oh = blockIdx.y * blockDim.y + threadIdx.y; if (ow < W && oh < H) { int iw = horizontal ? W - ow - 1 : ow; int ih = vertical ? H - oh - 1 : oh; #pragma unroll rep(c, IC) { cache[threadIdx.y][threadIdx.x][c] = src[blockIdx.z * stride1 + ih * stride2 + iw * stride3 + c]; } __syncthreads(); #pragma unroll rep(c, IC) { dst[blockIdx.z * stride1 + oh * stride2 + ow * stride3 + c] = cache[threadIdx.y][threadIdx.x][c]; } } } #undef rep } // anonymous namespace namespace flip { template <typename T, bool vertical, bool horizontal> void flip(const T *src, T *dst, size_t N, size_t H, size_t W, size_t IC, size_t stride1, size_t stride2, size_t stride3, hipStream_t stream) { dim3 threads(BX, BY); dim3 blocks(DIVUP(W, BX), DIVUP(H, BY), N); megdnn_assert(IC == 1 || IC == 3); if (IC == 1) hipLaunchKernelGGL(( flip_kern<T, vertical, horizontal, 1>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, H, W, stride1, stride2, stride3); else hipLaunchKernelGGL(( flip_kern<T, vertical, horizontal, 3>), dim3(blocks), dim3(threads), 0, stream, src, dst, N, H, W, stride1, stride2, stride3); after_kernel_launch(); } #define INST(T, vertical, horizontal) \ template void flip<T, vertical, horizontal>( \ const T *src, T *dst, size_t N, size_t H, size_t W, size_t IC, \ size_t stride1, size_t stride2, size_t stride3, hipStream_t); #define cb(DType) \ INST(typename DTypeTrait<DType>::ctype, true, true) \ INST(typename DTypeTrait<DType>::ctype, true, false) \ INST(typename DTypeTrait<DType>::ctype, false, true) \ INST(typename DTypeTrait<DType>::ctype, false, false) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace flip } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
0f75689c5d93983c99ad6cb0174683b1c3446c13.cu
/** * \file dnn/src/cuda/flip/flip.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./flip.cuh" #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { static const int BX = 16; static const int BY = 16; namespace { #define rep(i, n) for (size_t i = 0; i < (n); ++i) template <typename T, bool vertical, bool horizontal, size_t IC> __global__ void flip_kern(const T *src, T *dst, size_t N, size_t H, size_t W, size_t stride1, size_t stride2, size_t stride3) { __shared__ T cache[BX][BY][IC]; int ow = blockIdx.x * blockDim.x + threadIdx.x; int oh = blockIdx.y * blockDim.y + threadIdx.y; if (ow < W && oh < H) { int iw = horizontal ? W - ow - 1 : ow; int ih = vertical ? H - oh - 1 : oh; #pragma unroll rep(c, IC) { cache[threadIdx.y][threadIdx.x][c] = src[blockIdx.z * stride1 + ih * stride2 + iw * stride3 + c]; } __syncthreads(); #pragma unroll rep(c, IC) { dst[blockIdx.z * stride1 + oh * stride2 + ow * stride3 + c] = cache[threadIdx.y][threadIdx.x][c]; } } } #undef rep } // anonymous namespace namespace flip { template <typename T, bool vertical, bool horizontal> void flip(const T *src, T *dst, size_t N, size_t H, size_t W, size_t IC, size_t stride1, size_t stride2, size_t stride3, cudaStream_t stream) { dim3 threads(BX, BY); dim3 blocks(DIVUP(W, BX), DIVUP(H, BY), N); megdnn_assert(IC == 1 || IC == 3); if (IC == 1) flip_kern<T, vertical, horizontal, 1><<<blocks, threads, 0, stream>>>( src, dst, N, H, W, stride1, stride2, stride3); else flip_kern<T, vertical, horizontal, 3><<<blocks, threads, 0, stream>>>( src, dst, N, H, W, stride1, stride2, stride3); after_kernel_launch(); } #define INST(T, vertical, horizontal) \ template void flip<T, vertical, horizontal>( \ const T *src, T *dst, size_t N, size_t H, size_t W, size_t IC, \ size_t stride1, size_t stride2, size_t stride3, cudaStream_t); #define cb(DType) \ INST(typename DTypeTrait<DType>::ctype, true, true) \ INST(typename DTypeTrait<DType>::ctype, true, false) \ INST(typename DTypeTrait<DType>::ctype, false, true) \ INST(typename DTypeTrait<DType>::ctype, false, false) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace flip } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
05763a1bb0caad338feb95b03372df2f529140dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <stdlib.h> #include <iostream> #include "lodepng.h" using namespace std; __global__ void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){ int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int new_pos; if((y < n) && (x < m)) { new_pos = (y*m+x)*4; unsigned char r = d_Pin[new_pos]; unsigned char g = d_Pin[new_pos+1]; unsigned char b = d_Pin[new_pos+2]; d_Pout[new_pos] = 0.21f*r + 0.71f*g + 0.07f*b; d_Pout[new_pos+1] = d_Pout[new_pos]; d_Pout[new_pos+2] = d_Pout[new_pos]; d_Pout[new_pos+3] = d_Pin[new_pos+3]; } } __global__ void PictureKernel1D(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){ int x = blockIdx.x * blockDim.x + threadIdx.x; x = x*4; if(x < n*m*4) { unsigned char r = d_Pin[x]; unsigned char g = d_Pin[x+1]; unsigned char b = d_Pin[x+2]; d_Pout[x] = 0.21f*r + 0.71f*g + 0.07f*b; d_Pout[x+1] = d_Pout[x]; d_Pout[x+2] = d_Pout[x]; d_Pout[x+3] = d_Pin[x+3]; } } void Picture(unsigned char* Pin, unsigned char* Pout, int n, int m){ unsigned char* d_Pout, *d_Pin; long int size = n*m*4; hipMalloc((void **) &d_Pin,size); hipMemcpy(d_Pin, Pin, size, hipMemcpyHostToDevice); hipMalloc((void **) &d_Pout,size); dim3 gridDim((m-1)/8+1,(n-1)/16+1,1); dim3 blockDim(8,16,1); hipLaunchKernelGGL(( PictureKernell), dim3(gridDim),dim3(blockDim), 0, 0, d_Pin,d_Pout,n,m); //PictureKernel1D<<<(size-1)/256+1,256>>>(d_Pin,d_Pout,n,m); hipMemcpy(Pout, d_Pout, size, hipMemcpyDeviceToHost); hipFree(d_Pin); hipFree(d_Pout); } int main(int argc, char * argv[] ){ unsigned char *image, *out_image; int i; char name_in[100], name_out[100]; unsigned width, height; if(argv[1] == NULL or argv[2] == NULL) cout << "Usage\n inverse.cu [input image] [output image]\n"; strcpy(name_in,argv[1]); strcpy(name_out,argv[2]); i = lodepng_decode32_file(&image, &width, &height, name_in); if(i < 0) printf("NO\n"); out_image = (unsigned char*) malloc(width*height*4); Picture(image,out_image,height,width); lodepng_encode32_file(name_out,out_image,width,height); free(image); free(out_image); return 0; }
05763a1bb0caad338feb95b03372df2f529140dc.cu
#include <stdio.h> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <stdlib.h> #include <iostream> #include "lodepng.h" using namespace std; __global__ void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){ int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int new_pos; if((y < n) && (x < m)) { new_pos = (y*m+x)*4; unsigned char r = d_Pin[new_pos]; unsigned char g = d_Pin[new_pos+1]; unsigned char b = d_Pin[new_pos+2]; d_Pout[new_pos] = 0.21f*r + 0.71f*g + 0.07f*b; d_Pout[new_pos+1] = d_Pout[new_pos]; d_Pout[new_pos+2] = d_Pout[new_pos]; d_Pout[new_pos+3] = d_Pin[new_pos+3]; } } __global__ void PictureKernel1D(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){ int x = blockIdx.x * blockDim.x + threadIdx.x; x = x*4; if(x < n*m*4) { unsigned char r = d_Pin[x]; unsigned char g = d_Pin[x+1]; unsigned char b = d_Pin[x+2]; d_Pout[x] = 0.21f*r + 0.71f*g + 0.07f*b; d_Pout[x+1] = d_Pout[x]; d_Pout[x+2] = d_Pout[x]; d_Pout[x+3] = d_Pin[x+3]; } } void Picture(unsigned char* Pin, unsigned char* Pout, int n, int m){ unsigned char* d_Pout, *d_Pin; long int size = n*m*4; cudaMalloc((void **) &d_Pin,size); cudaMemcpy(d_Pin, Pin, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_Pout,size); dim3 gridDim((m-1)/8+1,(n-1)/16+1,1); dim3 blockDim(8,16,1); PictureKernell<<<gridDim,blockDim>>>(d_Pin,d_Pout,n,m); //PictureKernel1D<<<(size-1)/256+1,256>>>(d_Pin,d_Pout,n,m); cudaMemcpy(Pout, d_Pout, size, cudaMemcpyDeviceToHost); cudaFree(d_Pin); cudaFree(d_Pout); } int main(int argc, char * argv[] ){ unsigned char *image, *out_image; int i; char name_in[100], name_out[100]; unsigned width, height; if(argv[1] == NULL or argv[2] == NULL) cout << "Usage\n inverse.cu [input image] [output image]\n"; strcpy(name_in,argv[1]); strcpy(name_out,argv[2]); i = lodepng_decode32_file(&image, &width, &height, name_in); if(i < 0) printf("NO\n"); out_image = (unsigned char*) malloc(width*height*4); Picture(image,out_image,height,width); lodepng_encode32_file(name_out,out_image,width,height); free(image); free(out_image); return 0; }
7d78676512b3b49376431ea5008fabbda58f2b19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* MULTI-NODE AND PARALLEL MATRIX-MATRIX PRODUCT WITH MPI AND CUDA */ /* */ /* Description: This program performs a matrix product (A * B = C) */ /* distributing the computation between multiple nodes */ /* with MPI technology and parallelizing the computation in */ /* every node with Nvidia CUDA technology */ /* Compilation: nvcc -I/opt/mpi/bullxmpi/1.2.9.1/include */ /* -L/opt/mpi/bullxmpi/1.2.9.1/lib -lmpi -ldl -lm -lnuma */ /* -lrt -lnsl -lutil -lm -ldl mmpmpicuda.cu -o mmpmpicuda */ /* Strategy: */ /* Example 16x16 matrices with 4 nodes: */ /* _________________16________________ */ /* | | */ /* | NODE 1 | 4 */ /* |_________________________________| */ /* | | */ /* | NODE 2 | 4 */ /* C = |_________________________________| 16 */ /* | | */ /* | NODE 3 | 4 */ /* |_________________________________| */ /* | | */ /* | NODE 4 | 4 */ /* |_________________________________| */ /* */ /* Node 1 computes 4 rows of result matrix: */ /* __________________________________ */ /* | | */ /* | 4x16 CUDA block | */ /* |_________________________________| */ /* */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <mpi.h> #define N 1024 # It has to be 32 multiple. Min 32 * Number of nodes. #define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0) struct timeval start_time, end_time; inline void checkCuda(hipError_t e) { if (e != hipSuccess) { err("CUDA Error %d: %s\n", e, hipGetErrorString(e)); } } __global__ void matrixProduct(double *matrix_a, double *matrix_b, double *matrix_c, int width, int from, int my_rank) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; matrix_c[row * width + col] = 0; for (int k=0; k<width; k++) { matrix_c[row * width + col] += matrix_a[((row + from) * width) + k] * matrix_b[k * width + col]; } } void initializeMatrices(double matrix_a[N][N], double matrix_b[N][N]) { int i, j; srand(time(NULL)); for (i=0; i<N; i++) { for (j=0; j<N; j++) { matrix_a[i][j] = rand(); matrix_b[i][j] = rand(); } } } void showMatrices(double matrix_a[N][N], double matrix_b[N][N], double matrix_c[N][N]) { int i, j; srand(time(NULL)); printf("***** MATRIX A ***** \n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) { (j % N == N-1) ? printf("%.1f \n", matrix_a[i][j]) : printf("%.1f,", matrix_a[i][j]); } } printf("***** MATRIX B ***** \n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) { (j % N == N-1) ? printf("%.1f \n", matrix_b[i][j]) : printf("%.1f,", matrix_b[i][j]); } } printf("***** RESULT MATRIX ***** \n"); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { (j % N == N-1) ? printf("%f \n", matrix_c[i][j]) : printf("%f,", matrix_c[i][j]); } } } void checkMatrices(double matrix_a[N][N], double matrix_b[N][N], double matrix_c[N][N], double matrix_testc[N][N]) { int i, j, k; for(i = 0; i < N; i++) for(j = 0; j < N; j++) for(k = 0; k < N; k++) { matrix_testc[i][j] += matrix_a[i][k] * matrix_b[k][j]; } for(i = 0; i < 32 == 1; i++) { for(j = 0; j < 32; j++){ printf("%.1f ", (matrix_c[i][j])); } printf("\n"); } printf("\n\n\n"); for(i = 0; i < 32 == 1; i++) { for(j = 0; j < 32; j++){ printf("%.1f ", (matrix_testc[i][j])); } printf("\n"); } } int main(int argc, char *argv[]) { double A[N][N], B[N][N], C[N][N], C_TEST[N][N]; double *d_a, *d_b, *d_c; int my_rank, comm_sz, from, to, nrows; // MPI initialization MPI_Init (&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Process id MPI_Comm_size(MPI_COMM_WORLD, &comm_sz); // Number of processors if (N % comm_sz != 0) { if (my_rank == 0) printf("Matrix size not divisible by number of processors \n"); MPI_Finalize(); exit(-1); } // Calculate interval lines to compute per node from = my_rank * N / comm_sz; to = (my_rank + 1) * N / comm_sz; nrows = to - from; if (my_rank == 0) { initializeMatrices(A, B); } // Send A y B to every node MPI_Bcast(A, N*N, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(B, N*N, MPI_DOUBLE, 0, MPI_COMM_WORLD); // Allocate memory in the device checkCuda(hipMalloc((void **) &d_a, N*N*sizeof(double))); checkCuda(hipMalloc((void **) &d_b, N*N*sizeof(double))); checkCuda(hipMalloc((void **) &d_c, (N*N/comm_sz)*sizeof(double))); // Copy the information in the device checkCuda(hipMemcpy(d_a, A, N*N*sizeof(double), hipMemcpyHostToDevice)); checkCuda(hipMemcpy(d_b, B, N*N*sizeof(double), hipMemcpyHostToDevice)); // CUDA threads structure definition dim3 dimGrid(N/32, N/(32*comm_sz)); dim3 dimBlock(32, 32); // MAX BLOCK SIZE MPI_Barrier(MPI_COMM_WORLD); if (my_rank == 0) { gettimeofday(&start_time, NULL); } // Kernel launch hipLaunchKernelGGL(( matrixProduct), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, N, from, my_rank); checkCuda(hipDeviceSynchronize()); checkCuda(hipGetLastError()); // Calculate compute time MPI_Barrier(MPI_COMM_WORLD); if (my_rank == 0) { gettimeofday(&end_time, NULL); printf("Compute time: %.1f ms \n", (float) (end_time.tv_sec - start_time.tv_sec) * 1000 + (end_time.tv_usec - start_time.tv_usec) / 1000); } // Get results from device checkCuda(hipMemcpy(C[from], d_c, (nrows)*N*sizeof(double), hipMemcpyDeviceToHost)); // Unify results from nodes MPI_Gather(C[from], N*N/comm_sz, MPI_DOUBLE, C, N*N/comm_sz, MPI_DOUBLE, 0, MPI_COMM_WORLD); // if (my_rank == 0) { showMatrices(A, B, C); } checkCuda(hipFree(d_a)); checkCuda(hipFree(d_b)); checkCuda(hipFree(d_c)); MPI_Finalize(); return 0; }
7d78676512b3b49376431ea5008fabbda58f2b19.cu
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* MULTI-NODE AND PARALLEL MATRIX-MATRIX PRODUCT WITH MPI AND CUDA */ /* */ /* Description: This program performs a matrix product (A * B = C) */ /* distributing the computation between multiple nodes */ /* with MPI technology and parallelizing the computation in */ /* every node with Nvidia CUDA technology */ /* Compilation: nvcc -I/opt/mpi/bullxmpi/1.2.9.1/include */ /* -L/opt/mpi/bullxmpi/1.2.9.1/lib -lmpi -ldl -lm -lnuma */ /* -lrt -lnsl -lutil -lm -ldl mmpmpicuda.cu -o mmpmpicuda */ /* Strategy: */ /* Example 16x16 matrices with 4 nodes: */ /* _________________16________________ */ /* | | */ /* | NODE 1 | 4 */ /* |_________________________________| */ /* | | */ /* | NODE 2 | 4 */ /* C = |_________________________________| 16 */ /* | | */ /* | NODE 3 | 4 */ /* |_________________________________| */ /* | | */ /* | NODE 4 | 4 */ /* |_________________________________| */ /* */ /* Node 1 computes 4 rows of result matrix: */ /* __________________________________ */ /* | | */ /* | 4x16 CUDA block | */ /* |_________________________________| */ /* */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <mpi.h> #define N 1024 # It has to be 32 multiple. Min 32 * Number of nodes. #define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0) struct timeval start_time, end_time; inline void checkCuda(cudaError_t e) { if (e != cudaSuccess) { err("CUDA Error %d: %s\n", e, cudaGetErrorString(e)); } } __global__ void matrixProduct(double *matrix_a, double *matrix_b, double *matrix_c, int width, int from, int my_rank) { int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; matrix_c[row * width + col] = 0; for (int k=0; k<width; k++) { matrix_c[row * width + col] += matrix_a[((row + from) * width) + k] * matrix_b[k * width + col]; } } void initializeMatrices(double matrix_a[N][N], double matrix_b[N][N]) { int i, j; srand(time(NULL)); for (i=0; i<N; i++) { for (j=0; j<N; j++) { matrix_a[i][j] = rand(); matrix_b[i][j] = rand(); } } } void showMatrices(double matrix_a[N][N], double matrix_b[N][N], double matrix_c[N][N]) { int i, j; srand(time(NULL)); printf("***** MATRIX A ***** \n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) { (j % N == N-1) ? printf("%.1f \n", matrix_a[i][j]) : printf("%.1f,", matrix_a[i][j]); } } printf("***** MATRIX B ***** \n"); for (i=0; i<N; i++) { for (j=0; j<N; j++) { (j % N == N-1) ? printf("%.1f \n", matrix_b[i][j]) : printf("%.1f,", matrix_b[i][j]); } } printf("***** RESULT MATRIX ***** \n"); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { (j % N == N-1) ? printf("%f \n", matrix_c[i][j]) : printf("%f,", matrix_c[i][j]); } } } void checkMatrices(double matrix_a[N][N], double matrix_b[N][N], double matrix_c[N][N], double matrix_testc[N][N]) { int i, j, k; for(i = 0; i < N; i++) for(j = 0; j < N; j++) for(k = 0; k < N; k++) { matrix_testc[i][j] += matrix_a[i][k] * matrix_b[k][j]; } for(i = 0; i < 32 == 1; i++) { for(j = 0; j < 32; j++){ printf("%.1f ", (matrix_c[i][j])); } printf("\n"); } printf("\n\n\n"); for(i = 0; i < 32 == 1; i++) { for(j = 0; j < 32; j++){ printf("%.1f ", (matrix_testc[i][j])); } printf("\n"); } } int main(int argc, char *argv[]) { double A[N][N], B[N][N], C[N][N], C_TEST[N][N]; double *d_a, *d_b, *d_c; int my_rank, comm_sz, from, to, nrows; // MPI initialization MPI_Init (&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Process id MPI_Comm_size(MPI_COMM_WORLD, &comm_sz); // Number of processors if (N % comm_sz != 0) { if (my_rank == 0) printf("Matrix size not divisible by number of processors \n"); MPI_Finalize(); exit(-1); } // Calculate interval lines to compute per node from = my_rank * N / comm_sz; to = (my_rank + 1) * N / comm_sz; nrows = to - from; if (my_rank == 0) { initializeMatrices(A, B); } // Send A y B to every node MPI_Bcast(A, N*N, MPI_DOUBLE, 0, MPI_COMM_WORLD); MPI_Bcast(B, N*N, MPI_DOUBLE, 0, MPI_COMM_WORLD); // Allocate memory in the device checkCuda(cudaMalloc((void **) &d_a, N*N*sizeof(double))); checkCuda(cudaMalloc((void **) &d_b, N*N*sizeof(double))); checkCuda(cudaMalloc((void **) &d_c, (N*N/comm_sz)*sizeof(double))); // Copy the information in the device checkCuda(cudaMemcpy(d_a, A, N*N*sizeof(double), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(d_b, B, N*N*sizeof(double), cudaMemcpyHostToDevice)); // CUDA threads structure definition dim3 dimGrid(N/32, N/(32*comm_sz)); dim3 dimBlock(32, 32); // MAX BLOCK SIZE MPI_Barrier(MPI_COMM_WORLD); if (my_rank == 0) { gettimeofday(&start_time, NULL); } // Kernel launch matrixProduct<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, N, from, my_rank); checkCuda(cudaDeviceSynchronize()); checkCuda(cudaGetLastError()); // Calculate compute time MPI_Barrier(MPI_COMM_WORLD); if (my_rank == 0) { gettimeofday(&end_time, NULL); printf("Compute time: %.1f ms \n", (float) (end_time.tv_sec - start_time.tv_sec) * 1000 + (end_time.tv_usec - start_time.tv_usec) / 1000); } // Get results from device checkCuda(cudaMemcpy(C[from], d_c, (nrows)*N*sizeof(double), cudaMemcpyDeviceToHost)); // Unify results from nodes MPI_Gather(C[from], N*N/comm_sz, MPI_DOUBLE, C, N*N/comm_sz, MPI_DOUBLE, 0, MPI_COMM_WORLD); // if (my_rank == 0) { showMatrices(A, B, C); } checkCuda(cudaFree(d_a)); checkCuda(cudaFree(d_b)); checkCuda(cudaFree(d_c)); MPI_Finalize(); return 0; }
835ee1f5c1cfe47cf3a20bea5db220b64fd6bd27.hip
// !!! This is a file automatically generated by hipify!!! /*********************************************||******************************************** Genetic algorithm optimizer genA.cu Runs iterations of a genetic algoirthm to optimize molecular mechanics dihedral parameters @author James Maier and edits Kellon Belfon @lab Carlos Simmerling lab, Stony Brook University @version 2.0 2016 Aug 1 **********************************************||********************************************/ /******************************************************************************************* ---------------LOAD LIBRARIES------------- *******************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <math.h> #include <iostream> #include <fstream> #include <string> #include <sstream> #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/generate.h> #include <thrust/device_ptr.h> /*#undef __GLIBCXX_ATOMIC_BUILTINS #undef __GLIBCXX_USE_INT128 #define _GLIBCXX_GTHREAD_USE_WEAK 0 */ #include <list> #include <map> #include "load.cpp" #include "parse.hpp" using namespace std; /* specifying # of threads for a given block, 256 block threads (index 0 to 255) */ const int BLOCK_SIZE=256; //#define HANDLE_ERROR(x) x;error=hipGetLastError();if(error!=hipSuccess){printf("CUDA error: %s\n", hipGetErrorString(error));exit(-1);} #define HANDLE_ERROR(x) x; /************************************************************************************************* Defining the six pivotal functions for the genetic algorithm (1) mateIt, (2) mutateIt, (3) scoreIt, (4) calcAreas, (5) moveEm, (6) getSumAreas getSumAreas uses two other functions sumEm and sumEmIndex *************************************************************************************************/ /************************************************************************************************ | function1: mateIt | * creates offspring from a population, generating crossovers according to pCross * * @param Vs a global array of all the parent and child genomes * @param ptrs array of pointers from logical indices to actual indices into Vs for each individual * @param areas the probabilities for choosing each individual for mating * @param sumArea pointer to the sum of all the individual areas * @param rands array of random numbers * @param pCross probability that crossover occurs * @param pSize number of individuals in the population * @param genomeSize number of genes in a genome ************************************************************************************************/ __global__ void mateIt(float *Vs, int *ptrs, const float *areas, const float *sumArea, const float *rands, const float pCross, const int pSize, const int genomeSize) { /* figure out index blockId.x is the index by blocks, blockDIM.x is the elements per blocks (# of threads ina block) threadIdx is the index for threads */ int i=blockIdx.x * blockDim.x + threadIdx.x; /* first parent, second parent, crossover random numbers randi is three arrays with block and thread index of randoms numbers from 0 to 255; The cross over random numbers */ int randi=i*3; //multiply i by 2, as we will have 2 parents and 2 offspring multiplication is done using a left bitwise (<<) by 1 i<<=1; /* if we're in the population (sometimes warps may go past) The statement if (i<psize) is common in cuda: Before i index is used to access array elements, its value is checked against the number of elements, n, to ensure there are no out-of-bounds memory accesses. This check is required for cases where the number of elements in an array is not evenly divisible by the thread block size, and as a result the number of threads launched by the kernel is larger than the array size. */ if(i<pSize){ int parent[2]; int j; /* figure out parents */ parent[0]=parent[1]=-1; /* find parent where cumulative (cum) area (A) is less than random target (tgt) area */ float cumA=0.0f, tgtA=rands[randi++]* *sumArea; while(cumA<=tgtA){ ++parent[0]; cumA+=areas[ptrs[parent[0]]/genomeSize]; /* rands[randi-1] is the index back to zero since it is the first set of parents */ } #if DEBUG>2 printf("rands[%d] ; %f ; %f=%f * %f\n",randi, cumA, tgtA, rands[randi-1], *sumArea); printf("first parent\n"); #endif cumA=0.0f; tgtA=rands[randi++]* (*sumArea-areas[ptrs[parent[0]]/genomeSize]); while(cumA<=tgtA){ ++parent[1]; if(parent[1]==parent[0]) ++parent[1]; cumA+=areas[ptrs[parent[1]]/genomeSize]; } #if DEBUG>2 printf("Make offspring %d from %d and %d (%f=%f*(%f-%f)) %d\n", i, parent[0], parent[1], tgtA, rands[randi-1], *sumArea, areas[ptrs[parent[0]]/genomeSize], randi); #endif /* add offset of pSize to i because it is a child (next population) */ i+=pSize; /* use ptrs to get indices into Vs */ int i0=ptrs[i], i1=ptrs[i+1]; parent[0]=ptrs[parent[0]]; parent[1]=ptrs[parent[1]]; /* set j to index for the next set of Vs */ j=i0+genomeSize; /* put parent[0], parent[1], and i1 relative to i0, so we can just add i0 for index */ parent[0]-=i0; parent[1]-=i0; i1-=i0; /* start with crossover pt at the end (no crossover) */ int crossPt=j; /* check for crossover */ if(rands[randi]<pCross){ crossPt=i0+1+(int)(rands[randi]/pCross*(float)(genomeSize-1)); } //int halfcpt=genomeSize/2; //if (crossPt < halfcpt){ //while(i0<crossPt){ //Vs[i0]=Vs[parent[1]+i0]; //Vs[i1+i0]=Vs[parent[0]+i0]; //} //} //else { //while(i0>=crossPt){ //Vs[i0]=Vs[parent[1]+i0]; //Vs[i1+i0]=Vs[parent[0]+i0]; //} //} //while(i0<crossPt){ /* load next bit from parent and increment i */ //Vs[i0]=Vs[parent[0]+i0]; //Vs[i1+i0]=Vs[parent[1]+i0]; //++i0; //} //while(i0>=crossPt && i0<j){ i0=crossPt; for(i0;i0<j;i0++){ Vs[i0]=Vs[parent[1]+i0]; Vs[i1+i0]=Vs[parent[0]+i0]; ++i0; } } } /************************************************************************************************ | function 2: mutateIt | * @brief introduces mutations to the genomes in Vs, according to probability pMut, with a max perturbation of max * * @param Vs a global array of all the parent and child genomes * @param ptrs array of pointers from logical indices to actual indices into Vs for each individual @param rands array of random numbers * @param pSize number of individuals in the population * @param pMut probability that a mutation occurs, evaluated for each gene * @param max maximum perturbation to an allele * @param genomeSize number of genes in a genome *************************************************************************************************/ __global__ void mutateIt(float *Vs, int *ptrs, const float *rands, const int pSize, const float pMut, const float max, const int genomeSize) { /* figure out index */ int i=blockIdx.x * blockDim.x + threadIdx.x; if(i<pSize){ // get index into random number array int r=i*genomeSize; i=ptrs[i]; int j=i+genomeSize; // want random numbers from [-max, max). will subtract max later float scale=2.0f*max/pMut; // iterate through genome while(i<j){ if(rands[r]<pMut){ // mutate the amplitude by adding perturbation Vs[i]+=rands[r]*scale-max; } ++i; ++r; } } } /************************************************************************************************ | function 3: scoreIt | * @brief calculates a score indicating the closeness of fit for each individual/chromosome (set of parameters) against the training set * @param scores score for each conformation, calculated here * @param areas weighting for each conformation, was formerly calculated here * @param Vs a global array of all the parent and child genomes * @param ptrs array of pointers from logical indices to actual indices into Vs for each individual * @param tset training set * @param tgts targets for training * @param wts weights of each point in the training set * @param breaks breaks in training set, where different data should not be compared across breaks * @param nConf number of conformations in training set * @param pSize number of individuals in the population * @param genomeSize number of genes in a genome * @param xx space to store energy differences for each conformation with test parameters ************************************************************************************************/ __global__ void scoreIt(float *scores, float *areas, const float *Vs, const int *ptrs, const float *tset, const float *tgts, const float *wts, const int *breaks, const int nConf, const int pSize, const int genomeSize, float *xx) { int i=blockIdx.x * blockDim.x + threadIdx.x; //if((i<<1)<(pSize-1)*pSize){ if(i<pSize){ float *x=xx+i*nConf; // for the error of each conformation // get reference to score float *S=scores+i; // set score to 0 *S=0.0f; // accumulate little s for each set float s; // get first index in genome int i0=ptrs[i]; // get index of next genome space for looping bounds int j=i0+genomeSize; // start with the first element in the training set int t=0; /* start at break 0 */ int b=0; /* loop over conformations c */ int c=0; while(c<nConf){ //int nP=0; s=0.0f; /* loop only in units without break points */ while(c<breaks[b+1]){ /* start with delta E (tgts) for a given conformation (c) within a break; see load.cpp conf (c) goes through until it reach a break. the loop will set delta E */ x[c]=tgts[c]; /* subtract contributions from each parameter for conformation c for each conformation e.g deltaE - cos (dihedral * periodicity) * parameter generated from chromosomes */ for(i=i0;i<j;i++,t++){ x[c]-=tset[t]*Vs[i]; } /* add differences in this error from all other errors */ for(int c2=breaks[b];c2<c;c2++){ float err=x[c]-x[c2]; s+=(err<0.0f?-err:err); } /* next conformation */ ++c; } /* add little error to big error S, weighted by number of pairs */ *S+=s*wts[b]; /* go to next breakpoint */ ++b; } #if DEBUG>1 printf("areas[%d]=%f\n",i0/genomeSize,areas[i0/genomeSize]); #endif } } /************************************************************************************************** * | function 4: calcAreas | * * * * calculates the areas (the probability) each individual has of mating * *___________________________________Parameters____________________________________________________* * @param scores scores for each individual (set of parameters) * * @param areas fitness for each individual, in terms of probability of mating * * @param ptrs array of pointers from logical indices to actual indices into Vs for each individual* * @param pSize number of individuals in the population * * @param genomeSize number of genes in a genome * **************************************************************************************************/ __global__ void calcAreas(float *scores, float *areas, const int *ptrs, const int pSize, const int genomeSize) { int i=blockIdx.x * blockDim.x + threadIdx.x; if(i<pSize){ areas[ptrs[i]/genomeSize]=__expf(-scores[i]/scores[0]); } } /************************************************************************************************ * | function 5: moveEm | * * @brief simple helper function for copying data from oldF, oldI to neWF, newI * * @param newF pointer to new float array * @param newI pointer to new int array * @param oldF pointer to old float array * @param oldI pointer to old int array * @param N number of floats/ints to copy *************************************************************************************************/ __global__ void moveEm(float * newF, int *newI, float *oldF, int *oldI, int N) { int i=blockIdx.x * blockDim.x + threadIdx.x; if(i<N){ newF[i]=oldF[i]; newI[i]=oldI[i]; } } /******************************| function 5 ends |***********************************************/ /************************************************************************************************ | sumEm and sumEmIndex : helper function for getSumAreas | * @brief performs a sum of each successive pair of N numbers in source and stores the sums in sums. intended to be run multiple times to sum over a whole array. if N is odd, the last sum index will be N/2-1 and contain the sum of the last 3 numbers * * @param sums where to store the sums * @param source where to get the numbers to sum together * @param N the dimension of source * * @return */ __global__ void sumEm(float *sums, float *source, int N){ int i=blockIdx.x*blockDim.x+threadIdx.x; int j=(i<<1); if(j+3<N)sums[i]=source[j]+source[j+1]; else if(j+3==N) sums[i]=source[j]+source[j+1]+source[j+2]; else if(j+2==N) sums[i]=source[j]+source[j+1]; } /* * @brief performs a sum of pairs of N numbers in source, using locations indicated by pointers. pointers has indices multiplied by genomeSize. intended to be run multiple times to sum over a whole array. if N is odd, the last sum index will be N/2-1 and contain the sum of the last 3 numbers * * @param sums where to store the sums * @param source where to get the numbers to sum together * @param N the dimension of source * @param ptrs the indices to use when gathering pairs for summation * @param genomeSize the number by which the indices in ptrs are scaled * * @return */ __global__ void sumEmIndex(float *sums, float *source, int N, const int *ptrs, const int genomeSize){ int i=blockIdx.x*blockDim.x+threadIdx.x; int j=(i<<1); if(j+3<N)sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize]; else if(j+3==N) sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize]+source[ptrs[j+2]/genomeSize]; else if(j+2==N) sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize]; #if DEBUG>1 if(j+2<=N)printf(" %d:%f",i,sums[i]); #endif } /*******************************| end of helper function |***************************************/ /************************************************************************************************* * | function 6: getSumAreas | * * ---------uses sumEmIndex and sumEM-------- * * * * @brief get sum of all areas * * * @param areas_d pointer to areas on device * * @param ptrs_d pointer to indices for each individual in population * @param pSize population size * @param temp_d pointer to temporary array on device * @param genomeSize number of alleles in genome ************************************************************************************************/ float *getSumAreas(float *areas_d, int *ptrs_d, int pSize, float *temp_d, const int & genomeSize){ int dim=pSize; int offset=0; /* the triple chevron below describes an execution configuration the first argument(((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE) in the execution configuration specifies the number of thread blocks in the grid, and the second specifies (BLOCK_SIZE) the number of threads in a thread block */ hipLaunchKernelGGL(( sumEmIndex) , dim3(((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, temp_d, areas_d, dim, ptrs_d, genomeSize); #if DEBUG>1 std::cout << std::endl; #endif pSize >>= 1; while((dim>>=1)>1){ offset^=pSize; hipLaunchKernelGGL(( sumEm) , dim3(((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, temp_d+offset, temp_d+(offset^pSize), dim); #if DEBUG>1 std::cout << std::endl; #endif } return temp_d+offset; } /* /////////////////////////////////////////////////////// ` ////////////////////////////////// ` ///////////////////// | | ///////////// ~ ~ ~ ~ ~ ~ ~ //////// | | ///// ____| |____ /// | | // ___| J.M |___ / | K.B | / PROGRAM BEGINS HERE | | **************************************************************************************************/ /************************************************************************************************* argc is a vairable with the number of arguments passed to GenA argv is a vector of strings representing the the arguments the GenA takes To run genA: ./genA -p parmfile < inputfile > outputfile parameters in the parmfile psize: population size, 1000-2000 nGen: number of generations, > 100000 pMut: probability of mutation, 0.01 - 0.001 max: maximal permissible mutation, 0.5 - 0.001 pCross: probability of crossover 0.8-0.9 randomseed: sixdigit random number, upon mutation, the lowest bit of the random number is used to determine whether the amplitude or the phase shift will change. input file: parametersfitting data using the following format: _____________________________________________________________________ -<dihedral> <AMBER atom type for dihedral 1> | -<dihedral> <AMBER atom type for dihedral 2> | <name of data set> <dihedral 1> <dihedral 2> | <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> | <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> | ... | / | <name of data set> <dihedral 1> <dihedral 2> | <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> | <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> | ... | / | _____________________________________________________________________| <dihedral> is the name of dihedral e.g phi, psi, chi1, chi2, chi3, etc <AMBER atom type for dihedral 1> e.g chi1 is N -CX-2C-2C for Met, get from frcmod file <name of data set> is any name, e.g Metalpha, Metbeta, Metcharge <dihedral 1 value> this is the dihedral value (deg) of the optimized QM structures e.g 105.62 <E_QM> the QM energy of conformation i with restraint dihedral <E_MM> the MM energy of conformation i with restraint dihedral with zeroed dihedral parameters in the frcmod ... repeat for all conformations within a break / (refer to as break (brk)) a break seperate conformations that are different e.g alpha backbone, beta backbone, charge amino acids GOODLUCK!!! [ O O ] [ b ' ] [ ----- ] contact: [email protected] with genA title for help ********************************************************************************************************/ int main(int argc, char *argv[]){ /* load genA parameters, see above */ hipError_t error; if (!(argv[1]=="-p")) std::cout << "please use -p for param file"; ConfigFile cfg(argv[2]); // check if keys exixt if (!(cfg.keyExists("pSize"))) std::cout << "oops you forgot pSize"; //make it for others //add the rest of parameters if exist as line above // Retreive the value of keys pSize if key dont exist return value will be 1 //add new parameters here int pSize = cfg.getValueOfKey<int>("pSize", 1); std::cout << "Population Size (pSize): " << pSize << "\n\n"; int nGen = cfg.getValueOfKey<int>("nGen", 1); std::cout << "Number of Generations (nGen): " << nGen << "\n\n"; float pMut = cfg.getValueOfKey<float>("pMut", 1); std::cout << "Probability of Mutations (pMut): " << pMut << "\n\n"; float max = cfg.getValueOfKey<float>("max", 1); std::cout << "Maximal permissible mutation (max): " << max << "\n\n"; float pCross = cfg.getValueOfKey<float>("pCross", 1); std::cout << "Probability of crossover (pCross): " << pCross << "\n\n"; int rseed = cfg.getValueOfKey<int>("rseed", 1); std::cout << "Random seed (rseed): " << rseed << "\n\n"; int peng = cfg.getValueOfKey<int>("peng", 1); std::cout << "Print scores every " << peng << "generations (peng)\n\n"; int ncp = cfg.getValueOfKey<int>("ncp", 1); std::cout << "Print scores of only " << ncp << " chromosomes every peng \n\n"; /* initializing CPU variables and arrays */ int genomeSize, g, N, nConf=0, save=pSize/10; float *rands, *Vs, *tset, *tgts, *wts, *scores; int *ptrs, *breaks, nBreaks; /* initializing GPU variables and arrays */ size_t nRands; hiprandGenerator_t gen; int *ptrs_d, *breaks_d; float *rands_d, *Vs_d, *tset_d, *tgts_d, *wts_d, *xx_d, *scores_d, *areas_d; /*specify the string of the savefile, scorefile, loadfile name */ std::string saveFile,loadFile,scoreFile,scoreTest; /* dealing with loading the input file and save file string name */ for (int i=2;i<argc;i++){ if(i+1<argc){ if(argv[i][0]=='-'&&argv[i][1]=='r')saveFile=argv[++i]; else if(argv[i][0]=='-'&&argv[i][1]=='c')loadFile=argv[++i]; else if(argv[i][0]=='-'&&argv[i][1]=='s')scoreFile=argv[++i]; else if(argv[i][0]=='-'&&argv[i][1]=='f')scoreTest=argv[++i]; } } /***************************| load data from load.cpp |****************************************** * Initializing host data(Normally the 2nd step) * * check load.cpp for this section * * map is a way to create a dictionary, correction map is an array with key * ************************************************************************************************/ /* initiating container with key and values name correctionMap */ std::map<std::string,DihCorrection> correctionMap; std::cout << "Input file loaded ('_')" << std::endl; /* load in arrays generated from load.cpp, check it out for further comments & specifies the addrress, loading the variables that contain address of another variable correctionMap is ..... */ load(std::cin, &tset, &tgts, &wts, &nConf, &breaks, &nBreaks, &genomeSize, correctionMap); /*******************************| memory allocation |******************************************** *************************************************************************************************/ /* first hipMalloc to initialize the CUDA subsystem hipMalloc allocates size bytes of linear memory on the device and returns in *devPtr a pointer to the allocated memory. It takes two parameters: (1) devPtr - Pointer to allocated device memory e.g variable &breaks_d that have the address of the the variable breaks_d (stored in memory) (2) size - Requested allocation size in bytes, which is nBreaks */ #if DEBUG && 0 for(int i=0;i<nConf;i++){ for(int j=0;j<genomeSize;j++) std::cerr << ' ' << tset[i*genomeSize+j]; std::cerr << std::endl; } std::cerr << tgts[0] << ' ' << tgts[1] << ' ' << tgts[2] << ' ' << tgts[3] << std::endl; std::cerr << "first hipMalloc, " << nBreaks << " breaks" << std::endl; #endif /* we are allocating space on the GPU to store four arrays (breaks_d, tgts_d, wts_d, tset_d) with size specified below. The size (# of elements the array can hold, which is directly related to memory to store each element in the array) of the array on the GPU is a lot larger than the host array at this point in the algorithm. Later we will add results to these arrays. */ hipMalloc((void **)&breaks_d, nBreaks*sizeof(int)); hipMalloc((void **)&tgts_d, (nBreaks-1+nConf*(1+genomeSize))*sizeof(float)); wts_d=tgts_d+nConf; tset_d=wts_d+nBreaks-1; /********************************| transfer data from host to device |**************************** * copy data arrays from CPU host (breaks,tset,tgts,wts) to device GPU (breaks_d, etc) * *************************************************************************************************/ #if DEBUG std::cerr << "COPY" << std::endl; #endif /* copying over the arrays from the CPU to GPU nbreaks is the # of dataset + 1. e.g if you are doing alpha and beta backbone set then nbreaks=3 genomesize is the # of fitting dihedral * periodicity, e.g 3 set of dihedral * 4 periodicity = 12 nconf is the # of conformations you are fitting tset is (E_QMi-E_MMi) + (E_MMref-E_QMref) for each conformation, which = nconf, see load.cpp tgts is the cos(dih*periodicity) for 4 periodicity for a dihedral for each conformation so 20 conf will give tgts of 20 (nconf) * 12 (# of dih * periodicity) = 120 */ hipMemcpy(breaks_d, breaks, nBreaks*sizeof(breaks[0]), hipMemcpyHostToDevice); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} hipMemcpy(tset_d, tset, nConf*genomeSize*sizeof(float), hipMemcpyHostToDevice); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} hipMemcpy(tgts_d, tgts, nConf*sizeof(float), hipMemcpyHostToDevice); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} hipMemcpy(wts_d, wts, (nBreaks-1)*sizeof(*wts), hipMemcpyHostToDevice); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} /**********************| initiate GPU blocks and # of random variable |*************************** * we need randoms, new pop 3xcrossover, genomeSizexmut * * genome size is the number of genes which is all the parameters, * * e.g for 4 periodicity and three dihedral fitting, then genomesize will be 4 * 3 = 12 * * nRands is number of randoms we need for each set of parameters * * e.g if psize (population size) is 10, then number of random number we will need is * * (3+(# of periodicity x # of dihedral)) * psize * * so for 4 periodicity and 3 dihedral fitting (chi1 chi2 chi3), then nRands = 3+12 * 10 = 150 * *________________________________________________________________________________________________* * nBlocks is dependent on the population size, it is use to figure out how many GPU blocks * * we need to initialize the arrays for calculations. Each block has 256 threads. * * one thread represent one individual (chromosome with soln parameters) from the population * * e.g population size of 2000 will require (2000+256-1)/256 = 8.81 => 8 blocks * * * *************************************************************************************************/ nRands=(3+genomeSize)*pSize; int nBlocks=(pSize+BLOCK_SIZE-1)/BLOCK_SIZE; #ifdef DEBUG std::cerr << nRands << "nRands\n"; std::cerr << nBlocks << " blocks\n"; #endif /*******************************| initializing more host and device variables|************************ * N (bitwise operation below) is the pSize (1st input) multiply by 2; * * initiating the chromosomes which have the solns * ************************************************************************************************/ #if DEBUG printf("Allocate memory\n"); #endif rands=(float *)malloc(nRands*sizeof(float)); //hipMalloc((void **)&rands_d, nRands*sizeof(float)); N=(pSize<<1); HANDLE_ERROR(hipMalloc((void **)&Vs_d, (N*(genomeSize+4)+pSize*nConf+nRands)*sizeof(float))); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} rands_d=Vs_d+N*genomeSize; scores_d=rands_d+nRands; areas_d=scores_d+(N<<1); xx_d=areas_d+(N<<1); scores=(float *)malloc(sizeof(*scores)*N); float *scores_ds[2]; scores_ds[0]=scores_d; scores_ds[1]=scores_d+N; Vs=(float *)malloc(N*genomeSize*sizeof(float)); /*allocate the memory space to hold array of pointers (prts) of size N (2*pSize) these pointers point to the individuals (chromosome) in the population */ ptrs=(int *)malloc(sizeof(int)*N); ptrs[0]=0; for(g=1;g<N;g++)ptrs[g]=ptrs[g-1]+genomeSize; HANDLE_ERROR(hipMalloc((void **)&ptrs_d, N*2*sizeof(int))); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} int *ptrs_ds[2]; ptrs_ds[0]=ptrs_d; ptrs_ds[1]=ptrs_d+N; hipMemcpy(ptrs_d, ptrs, sizeof(int)*N, hipMemcpyHostToDevice); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} int curList=0; #if 0 HANDLE_ERROR(hipMalloc((void **)&scores_d, N*sizeof(float))); HANDLE_ERROR(hipMalloc((void **)&xx_d, nOffspring*nConf*sizeof(float))); #endif /* for CUDA beginners on thrust thrust is a c++ template library for CUDA similar to STL it have two containers: thrust::host_vector<type> and thrust::device_vector<type> the containers make common operations such as hipMalloc, hipFree, hipMemcpy, more concise e.g thrust::host_vector<int> vec_h(2) will allocate host vector with 2 elements thrust::device_vectore<int> vec_d = vec_h will copy host vector to device this will allow you to directly manipulate device values from the host so vec_d[0] = 5; can be done from host and once you output vector memory is automatically released std::cout << "my vector" << vec_d[0] << std::endl; it have a few algorithms, we use thrust::sort(), */ thrust::device_ptr<int> dPtrs(ptrs_d), dPtrs_save(ptrs_d+save); thrust::device_ptr<float> dScores(scores_d), dVs(Vs_d); thrust::device_ptr<float> dScores_save(scores_d+save), dScores_pSize(scores_d+pSize), dScores_N(scores_d+N); /**************************| Create a random generator |******************************************** * hiprandCreateGenerator takes two parameters: pointer to generator (*gen), type of generator * Once created,random number generators can be defined using the general options seed, offset,& order* When rng_type is HIPRAND_RNG_PSEUDO_DEFAULT, the type chosen is HIPRAND_RNG_PSEUDO_XORWOW * *__________________________________________________________________________________________________* *hiprandSetPseudoRandomGeneratorSeed takes two parameters (1) the generator (gen) & (2) seed value * * seed value # is used to initialize the generator and control the set of random numbers; * * same seed will the give same set of random numbers of the psuedorandom generator * * rseed is the random number specified from the 6th input) * *__________________________________________________________________________________________________* * hiprandGenerateNormal take 5 parameters: * * (1) generator - Generator to use * * (2) outputPtr - Pointer to device memory to store CUDA-generated results, * or Pointer to host memory to store CPU-generated resluts * * (3) num - Number of floats to generate * * (4) mean - Mean of normal distribution * * (5) stddev - Standard deviation of normal distribution * * Results are 32-bit floating point values with mean and standard deviation. * ***************************************************************************************************/ #if DEBUG printf("Create random generator\n"); #endif // create the generator name gen hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); #if DEBUG printf("Seed random generator\n"); #endif // initiate the generator with the random seed (rseed) hiprandSetPseudoRandomGeneratorSeed(gen, rseed); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (seed)\n", hipGetErrorString(error));} #if DEBUG std::cerr << "GenerateNormal" << std::endl; #endif hiprandGenerateNormal(gen, Vs_d, N*genomeSize, 0, 1); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (normal)\n", hipGetErrorString(error));} /***************************| END of random generator part |***************************************/ // if we have a load file copy Vs (amplitude parameters) from the loaded file and populate Vs if(!loadFile.empty()) { std::ifstream loadS(loadFile.c_str(), std::ios::in | std::ios::binary); loadS.read((char*)Vs,pSize*genomeSize*sizeof(*Vs)); hipMemcpy(Vs_d, Vs, pSize*genomeSize*sizeof(*Vs), hipMemcpyHostToDevice); } /* timing event */ hipEvent_t events[3]; int nevents = (sizeof events) / (sizeof events[0]); for (int i = 0; i < nevents ; ++i) hipEventCreate(events+i, 0); hipEventRecord(events[0], 0); /***************************| score of the first set of chromosomes |******************************* * Here we score initial chromsomes * ***************************************************************************************************/ #if DEBUG std::cerr << "1stscore" << std::endl; #endif /* lauch first kernel to score the initial set of chromsomes (Vs_d) and output scores in scores_ds betweem the triple chervon is called the execution configuration that takes two parts 1st part takes the number of thread blocks and the second part take the number of threads in a block */ hipLaunchKernelGGL(( scoreIt) , dim3((N+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList], areas_d, Vs_d, ptrs_ds[curList], tset_d, tgts_d, wts_d, breaks_d, nConf, pSize, genomeSize, xx_d); /* score of chromosomes out of psize since we initiated 2 times psize */ hipLaunchKernelGGL(( scoreIt) , dim3((N+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList]+pSize, areas_d, Vs_d, ptrs_ds[curList]+pSize, tset_d, tgts_d, wts_d, breaks_d, nConf, pSize, genomeSize, xx_d); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (1stscore)\n", hipGetErrorString(error));} #if DEBUG std::cerr << "1stsort" << std::endl; #endif /* sort the scores from each chromosome of the initial population */ thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]), thrust::device_pointer_cast(scores_ds[curList]+N), thrust::device_pointer_cast(ptrs_ds[curList])); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (1stsort)\n", hipGetErrorString(error));} /* option to copy over scores from GPU device to CPU host */ #if DEBUG>2 hipMemcpy(scores, scores_ds[curList], sizeof(*scores)*N, hipMemcpyDeviceToHost); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} hipMemcpy(Vs, Vs_d, sizeof(*Vs)*N*genomeSize, hipMemcpyDeviceToHost); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} hipMemcpy(ptrs, ptrs_ds[curList], sizeof(*ptrs)*N, hipMemcpyDeviceToHost); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} /* i is each chromosome, scores[i] is scores, Vs[ptrs[i]] is the amplitude parameters; Vs[ptrs[i]]+n specifies the next n amplitude. e.g chromosome i have genomesize amplitude parms e.g Vs[ptrs[i]]+1 is the amplitude term when the periodicity is 3 for the 1st dihedral being fitted, and Vs[ptrs[i]]+4, the amplitude term when the periodicity is 4 for the 2nd dihedral */ for(int i=0;i<N;i++){ std::cerr << i << ": [" << ptrs[i] << "] = " << scores[i] << " {"<<Vs[ptrs[i]]<<" "<<Vs[ptrs[i]+1]<<" "<<Vs[ptrs[i]+2]<<" "<<Vs[ptrs[i]+3]<<"}\n"; } #endif hipEventRecord(events[1], 0); /****************************| Let us begin the iterations through generations |******************** Genetic algorithm iterations through the number of generations or isolation time ****************************************************************************************************/ //std::cout << "There is " << nGen << " generations" << " and " << N << " number of chromosomes (2 x population size)" << std::endl; /* for loop for the generation */ for(g=0;g<nGen;g++){ /*************************| Step1: Generate random numbers |****************************************/ #if DEBUG>1 printf("Generate random numbers\n"); printf(" %d",g);fflush(stdout); #endif // create an array of random numbers (rands_d) used for mutations and crossover where the number of random #s is nRands hiprandGenerateUniform(gen, rands_d, nRands); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} /***| Step2: calculate the probabilities (areas) each individual (chromosome) has of mating |******/ #if DEBUG>2 std::cerr << "Mate" << std::endl; #endif hipLaunchKernelGGL(( calcAreas) , dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList], areas_d, ptrs_d, pSize, genomeSize); /***| Step3: mate the individuals (chromosomes,Parent[0],[1]) selected for the next generation |***/ hipLaunchKernelGGL(( mateIt) , dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, Vs_d, ptrs_ds[curList], areas_d, getSumAreas(areas_d, ptrs_ds[curList], pSize, areas_d+N, genomeSize), rands_d, pCross, pSize, genomeSize); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (mate)\n", hipGetErrorString(error));} /*****************| Step4: mutate individuals generated after mating |*****************************/ #if DEBUG>2 std::cerr << "Mutate" << std::endl; #endif hipLaunchKernelGGL(( mutateIt) , dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, Vs_d, ptrs_ds[curList]+pSize, rands_d+pSize*3, pSize, pMut, max, genomeSize); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (mutate)\n", hipGetErrorString(error));} /**************| Step5: Score the individuals to select for the next generation |*******************/ #if DEBUG>2 std::cerr << "Score" << std::endl; #endif hipLaunchKernelGGL(( scoreIt) , dim3(nBlocks), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList]+pSize, areas_d, Vs_d, ptrs_ds[curList]+pSize, tset_d, tgts_d, wts_d, breaks_d, nConf, pSize, genomeSize, xx_d); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s (score)\n", hipGetErrorString(error));} #if DEBUG>2 //std::cerr << "Display em:\n\tCopy scores" << std::endl; hipMemcpy(scores, scores_ds[curList], sizeof(*scores)*N, hipMemcpyDeviceToHost); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} //std::cerr << "\tCopy Vs" << std::endl; hipMemcpy(Vs, Vs_d, sizeof(*Vs)*N*genomeSize, hipMemcpyDeviceToHost); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} //std::cerr << "\tCopy ptrs" << std::endl; hipMemcpy(ptrs, ptrs_ds[curList], sizeof(*ptrs)*N, hipMemcpyDeviceToHost); if((error=hipGetLastError())!=hipSuccess){fprintf(stderr, "Cuda error: %s\n", hipGetErrorString(error));} for(int i=0;i<N;i++){ /* below you can print the scores for a chromosomes every generation */ std::cout << "This is Generation: " << g << " and Chromosome (set of parameters): " << i << std::endl; std::cout << "Score: " << scores[i] << std::endl; /* below you can print out the scores and the first four barrier parameters,since we are using 4 periodicity, the first 4 barrier parameters are for the 1st dihedral in the input file */ //std::cout << i << ": [" << ptrs[i] << "] = " << scores[i] << " {"<<Vs[ptrs[i]]<<" "<<Vs[ptrs[i]+1]<<" "<<Vs[ptrs[i]+2]<<" "<<Vs[ptrs[i]+3]<<"}\n"; std::cerr << i << ": [" << ptrs[i] << "] = " << scores[i] << " {"<<Vs[ptrs[i]]<<" "<<Vs[ptrs[i]+1]<<" "<<Vs[ptrs[i]+2]<<" "<<Vs[ptrs[i]+3]<<"}\n"; } #endif /*****| Step6: Sort the scored chromosomes (individuals) & select for mating for next generation |**/ #if DEBUG>2 std::cerr << "Move 1" << std::endl; #endif hipLaunchKernelGGL(( moveEm) , dim3((save+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList^1], ptrs_ds[curList^1], scores_ds[curList], ptrs_ds[curList], save); #if DEBUG>2 std::cerr << "Move 2" << std::endl; #endif hipLaunchKernelGGL(( moveEm) , dim3((pSize+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList^1]+save, ptrs_ds[curList^1]+save, scores_ds[curList]+pSize, ptrs_ds[curList]+pSize, pSize);//nOffspring); #if DEBUG>2 std::cerr << "Move 3" << std::endl; #endif hipLaunchKernelGGL(( moveEm) , dim3((pSize-save+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, scores_ds[curList^1]+save+pSize, ptrs_ds[curList^1]+save+pSize, scores_ds[curList]+save, ptrs_ds[curList]+save, pSize-save); curList^=1; /* first sort only the ones that aren't going to be saved (elitist) */ #if DEBUG>1 std::cerr << "Selection sort (" << N << " items, less " << save << ")" << std::endl; #endif thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]+save), thrust::device_pointer_cast(scores_ds[curList]+pSize+save), thrust::device_pointer_cast(ptrs_ds[curList]+save)); /* then sort all those that fit within pSize */ #if DEBUG>1 std::cerr << "Rank sort" << std::endl; #endif thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]), thrust::device_pointer_cast(scores_ds[curList]+pSize), thrust::device_pointer_cast(ptrs_ds[curList])); /**************************************************************************************************** * Here you can print the score of chromosomes (total is 2 x population size) for each generation * * by uncommenting the if and end DEBUG statement, need to make this an input option * * such as -s which will mean print scores * ****************************************************************************************************/ //peng --> print every n generation, make a user option //ncp --> number of chromosomes to print, make a user option as well //if generation is divisable by peng if(g%peng==0) { std::ofstream scorefile; scorefile.open (scoreFile.c_str(), ios::out | ios::app); //it append to the writeout so make sure u delete scores file scorefile << "#Generation" << std::setw(14) << "Chromosomes" << std::setw(12) << "Scores\n"; hipMemcpy(scores, scores_ds[curList], sizeof(*scores)*N, hipMemcpyDeviceToHost); //hipMemcpy(Vs, Vs_d, sizeof(*Vs)*N*genomeSize, hipMemcpyDeviceToHost); //hipMemcpy(ptrs, ptrs_ds[curList], sizeof(*ptrs)*N, hipMemcpyDeviceToHost); if (ncp > pSize) { printf("Parmfile error: ncp should be smaller than psize! \n"); std::abort(); } for(int m=0;m<ncp;m++){ scorefile << std::setw(6) << g << std::setw(14) << m << std::setw(18) << scores[m] << "\n"; //scorefile << "Score: " << scores[m] << "\n"; //for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){ } scorefile.close(); } } // here the loop for generations ends hipEventRecord(events[2], 0); /**************************************************************************************************** * TERMINATION, LAST RESULTS< SCORES AND PARAMETERS FOR EACH INDIVIDUAL ****************************************************************************************************/ /* copy over the end result from GPU to the CPU to save the scores and parameters */ hipMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, hipMemcpyDeviceToHost); hipMemcpy(ptrs, ptrs_ds[curList], sizeof(int)*N, hipMemcpyDeviceToHost); hipMemcpy(scores, scores_ds[curList], sizeof(float)*N, hipMemcpyDeviceToHost); std::ofstream scoretest; scoretest.open (scoreTest.c_str(), ios::out | ios::trunc); //scoretest << "#Generation" << std::setw(14) << "Chromosomes" << std::setw(12) << "Scores\n"; for(int i=0;i<pSize;i++){ /* these are the final scores for each individual in the population, print in the output file */ //scoretest << std::setw(14) << i << std::setw(18) << scores[i] << "\n"; scoretest << std::setw(8) << scores[i] << "\n"; //std::cout << std::fixed << scores[i] << std::endl; for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){ /* second.setGenome(Vs+ptrs[i]) is the dihedral parameters for each individual in the population print in the output file */ //std::cout << it->second.setGenome(Vs+ptrs[i]); scoretest << std::setw(11) << it->second.setGenome(Vs+ptrs[i]); } } if(!saveFile.empty()){ std::ofstream saveS(saveFile.c_str(), std::ios::out | std::ios::binary); for(int i=0;i<pSize;i++) saveS.write((char *)(Vs+ptrs[i]),genomeSize*sizeof(*Vs)); } scoretest.close(); hipEventSynchronize(events[nevents-1]); float elapsedTimeInit, elapsedTimeCompute; hipEventElapsedTime(&elapsedTimeInit, events[0], events[1]); hipEventElapsedTime(&elapsedTimeCompute, events[1], events[2]); std::cout << "Initialization time: " << elapsedTimeInit * 1e-3 << std::endl; std::cout << "Computation time: " << elapsedTimeCompute * 1e-3 << std::endl; #if 0 std::cout << scores[pSize] << std::endl; for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){ std::cout << it->second.setGenome(Vs+ptrs[pSize]); //std::cout << it->second; } #endif free(ptrs); #if 0 printf("Copy random numbers\n"); hipMemcpy(rands, rands_d, nRands*sizeof(unsigned int), hipMemcpyDeviceToHost); printf("Print random numbers\n"); printf("%d",rands[0]); for(i=1;i<nRands;i++){ printf(" %d",rands[i]); } putchar('\n'); #endif /*****************| Free up GPU Memory |*******************************************************/ hiprandDestroyGenerator(gen); hipFree(Vs_d); hipFree(ptrs_d); hipFree(breaks_d); hipFree(tgts_d); free(Vs); free(scores); free(rands); return 0; }
835ee1f5c1cfe47cf3a20bea5db220b64fd6bd27.cu
/*********************************************||******************************************** Genetic algorithm optimizer genA.cu Runs iterations of a genetic algoirthm to optimize molecular mechanics dihedral parameters @author James Maier and edits Kellon Belfon @lab Carlos Simmerling lab, Stony Brook University @version 2.0 2016 Aug 1 **********************************************||********************************************/ /******************************************************************************************* ---------------LOAD LIBRARIES------------- *******************************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand.h> #include <math.h> #include <iostream> #include <fstream> #include <string> #include <sstream> #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/generate.h> #include <thrust/device_ptr.h> /*#undef __GLIBCXX_ATOMIC_BUILTINS #undef __GLIBCXX_USE_INT128 #define _GLIBCXX_GTHREAD_USE_WEAK 0 */ #include <list> #include <map> #include "load.cpp" #include "parse.hpp" using namespace std; /* specifying # of threads for a given block, 256 block threads (index 0 to 255) */ const int BLOCK_SIZE=256; //#define HANDLE_ERROR(x) x;error=cudaGetLastError();if(error!=cudaSuccess){printf("CUDA error: %s\n", cudaGetErrorString(error));exit(-1);} #define HANDLE_ERROR(x) x; /************************************************************************************************* Defining the six pivotal functions for the genetic algorithm (1) mateIt, (2) mutateIt, (3) scoreIt, (4) calcAreas, (5) moveEm, (6) getSumAreas getSumAreas uses two other functions sumEm and sumEmIndex *************************************************************************************************/ /************************************************************************************************ | function1: mateIt | * creates offspring from a population, generating crossovers according to pCross * * @param Vs a global array of all the parent and child genomes * @param ptrs array of pointers from logical indices to actual indices into Vs for each individual * @param areas the probabilities for choosing each individual for mating * @param sumArea pointer to the sum of all the individual areas * @param rands array of random numbers * @param pCross probability that crossover occurs * @param pSize number of individuals in the population * @param genomeSize number of genes in a genome ************************************************************************************************/ __global__ void mateIt(float *Vs, int *ptrs, const float *areas, const float *sumArea, const float *rands, const float pCross, const int pSize, const int genomeSize) { /* figure out index blockId.x is the index by blocks, blockDIM.x is the elements per blocks (# of threads ina block) threadIdx is the index for threads */ int i=blockIdx.x * blockDim.x + threadIdx.x; /* first parent, second parent, crossover random numbers randi is three arrays with block and thread index of randoms numbers from 0 to 255; The cross over random numbers */ int randi=i*3; //multiply i by 2, as we will have 2 parents and 2 offspring multiplication is done using a left bitwise (<<) by 1 i<<=1; /* if we're in the population (sometimes warps may go past) The statement if (i<psize) is common in cuda: Before i index is used to access array elements, its value is checked against the number of elements, n, to ensure there are no out-of-bounds memory accesses. This check is required for cases where the number of elements in an array is not evenly divisible by the thread block size, and as a result the number of threads launched by the kernel is larger than the array size. */ if(i<pSize){ int parent[2]; int j; /* figure out parents */ parent[0]=parent[1]=-1; /* find parent where cumulative (cum) area (A) is less than random target (tgt) area */ float cumA=0.0f, tgtA=rands[randi++]* *sumArea; while(cumA<=tgtA){ ++parent[0]; cumA+=areas[ptrs[parent[0]]/genomeSize]; /* rands[randi-1] is the index back to zero since it is the first set of parents */ } #if DEBUG>2 printf("rands[%d] ; %f ; %f=%f * %f\n",randi, cumA, tgtA, rands[randi-1], *sumArea); printf("first parent\n"); #endif cumA=0.0f; tgtA=rands[randi++]* (*sumArea-areas[ptrs[parent[0]]/genomeSize]); while(cumA<=tgtA){ ++parent[1]; if(parent[1]==parent[0]) ++parent[1]; cumA+=areas[ptrs[parent[1]]/genomeSize]; } #if DEBUG>2 printf("Make offspring %d from %d and %d (%f=%f*(%f-%f)) %d\n", i, parent[0], parent[1], tgtA, rands[randi-1], *sumArea, areas[ptrs[parent[0]]/genomeSize], randi); #endif /* add offset of pSize to i because it is a child (next population) */ i+=pSize; /* use ptrs to get indices into Vs */ int i0=ptrs[i], i1=ptrs[i+1]; parent[0]=ptrs[parent[0]]; parent[1]=ptrs[parent[1]]; /* set j to index for the next set of Vs */ j=i0+genomeSize; /* put parent[0], parent[1], and i1 relative to i0, so we can just add i0 for index */ parent[0]-=i0; parent[1]-=i0; i1-=i0; /* start with crossover pt at the end (no crossover) */ int crossPt=j; /* check for crossover */ if(rands[randi]<pCross){ crossPt=i0+1+(int)(rands[randi]/pCross*(float)(genomeSize-1)); } //int halfcpt=genomeSize/2; //if (crossPt < halfcpt){ //while(i0<crossPt){ //Vs[i0]=Vs[parent[1]+i0]; //Vs[i1+i0]=Vs[parent[0]+i0]; //} //} //else { //while(i0>=crossPt){ //Vs[i0]=Vs[parent[1]+i0]; //Vs[i1+i0]=Vs[parent[0]+i0]; //} //} //while(i0<crossPt){ /* load next bit from parent and increment i */ //Vs[i0]=Vs[parent[0]+i0]; //Vs[i1+i0]=Vs[parent[1]+i0]; //++i0; //} //while(i0>=crossPt && i0<j){ i0=crossPt; for(i0;i0<j;i0++){ Vs[i0]=Vs[parent[1]+i0]; Vs[i1+i0]=Vs[parent[0]+i0]; ++i0; } } } /************************************************************************************************ | function 2: mutateIt | * @brief introduces mutations to the genomes in Vs, according to probability pMut, with a max perturbation of max * * @param Vs a global array of all the parent and child genomes * @param ptrs array of pointers from logical indices to actual indices into Vs for each individual @param rands array of random numbers * @param pSize number of individuals in the population * @param pMut probability that a mutation occurs, evaluated for each gene * @param max maximum perturbation to an allele * @param genomeSize number of genes in a genome *************************************************************************************************/ __global__ void mutateIt(float *Vs, int *ptrs, const float *rands, const int pSize, const float pMut, const float max, const int genomeSize) { /* figure out index */ int i=blockIdx.x * blockDim.x + threadIdx.x; if(i<pSize){ // get index into random number array int r=i*genomeSize; i=ptrs[i]; int j=i+genomeSize; // want random numbers from [-max, max). will subtract max later float scale=2.0f*max/pMut; // iterate through genome while(i<j){ if(rands[r]<pMut){ // mutate the amplitude by adding perturbation Vs[i]+=rands[r]*scale-max; } ++i; ++r; } } } /************************************************************************************************ | function 3: scoreIt | * @brief calculates a score indicating the closeness of fit for each individual/chromosome (set of parameters) against the training set * @param scores score for each conformation, calculated here * @param areas weighting for each conformation, was formerly calculated here * @param Vs a global array of all the parent and child genomes * @param ptrs array of pointers from logical indices to actual indices into Vs for each individual * @param tset training set * @param tgts targets for training * @param wts weights of each point in the training set * @param breaks breaks in training set, where different data should not be compared across breaks * @param nConf number of conformations in training set * @param pSize number of individuals in the population * @param genomeSize number of genes in a genome * @param xx space to store energy differences for each conformation with test parameters ************************************************************************************************/ __global__ void scoreIt(float *scores, float *areas, const float *Vs, const int *ptrs, const float *tset, const float *tgts, const float *wts, const int *breaks, const int nConf, const int pSize, const int genomeSize, float *xx) { int i=blockIdx.x * blockDim.x + threadIdx.x; //if((i<<1)<(pSize-1)*pSize){ if(i<pSize){ float *x=xx+i*nConf; // for the error of each conformation // get reference to score float *S=scores+i; // set score to 0 *S=0.0f; // accumulate little s for each set float s; // get first index in genome int i0=ptrs[i]; // get index of next genome space for looping bounds int j=i0+genomeSize; // start with the first element in the training set int t=0; /* start at break 0 */ int b=0; /* loop over conformations c */ int c=0; while(c<nConf){ //int nP=0; s=0.0f; /* loop only in units without break points */ while(c<breaks[b+1]){ /* start with delta E (tgts) for a given conformation (c) within a break; see load.cpp conf (c) goes through until it reach a break. the loop will set delta E */ x[c]=tgts[c]; /* subtract contributions from each parameter for conformation c for each conformation e.g deltaE - cos (dihedral * periodicity) * parameter generated from chromosomes */ for(i=i0;i<j;i++,t++){ x[c]-=tset[t]*Vs[i]; } /* add differences in this error from all other errors */ for(int c2=breaks[b];c2<c;c2++){ float err=x[c]-x[c2]; s+=(err<0.0f?-err:err); } /* next conformation */ ++c; } /* add little error to big error S, weighted by number of pairs */ *S+=s*wts[b]; /* go to next breakpoint */ ++b; } #if DEBUG>1 printf("areas[%d]=%f\n",i0/genomeSize,areas[i0/genomeSize]); #endif } } /************************************************************************************************** * | function 4: calcAreas | * * * * calculates the areas (the probability) each individual has of mating * *___________________________________Parameters____________________________________________________* * @param scores scores for each individual (set of parameters) * * @param areas fitness for each individual, in terms of probability of mating * * @param ptrs array of pointers from logical indices to actual indices into Vs for each individual* * @param pSize number of individuals in the population * * @param genomeSize number of genes in a genome * **************************************************************************************************/ __global__ void calcAreas(float *scores, float *areas, const int *ptrs, const int pSize, const int genomeSize) { int i=blockIdx.x * blockDim.x + threadIdx.x; if(i<pSize){ areas[ptrs[i]/genomeSize]=__expf(-scores[i]/scores[0]); } } /************************************************************************************************ * | function 5: moveEm | * * @brief simple helper function for copying data from oldF, oldI to neWF, newI * * @param newF pointer to new float array * @param newI pointer to new int array * @param oldF pointer to old float array * @param oldI pointer to old int array * @param N number of floats/ints to copy *************************************************************************************************/ __global__ void moveEm(float * newF, int *newI, float *oldF, int *oldI, int N) { int i=blockIdx.x * blockDim.x + threadIdx.x; if(i<N){ newF[i]=oldF[i]; newI[i]=oldI[i]; } } /******************************| function 5 ends |***********************************************/ /************************************************************************************************ | sumEm and sumEmIndex : helper function for getSumAreas | * @brief performs a sum of each successive pair of N numbers in source and stores the sums in sums. intended to be run multiple times to sum over a whole array. if N is odd, the last sum index will be N/2-1 and contain the sum of the last 3 numbers * * @param sums where to store the sums * @param source where to get the numbers to sum together * @param N the dimension of source * * @return */ __global__ void sumEm(float *sums, float *source, int N){ int i=blockIdx.x*blockDim.x+threadIdx.x; int j=(i<<1); if(j+3<N)sums[i]=source[j]+source[j+1]; else if(j+3==N) sums[i]=source[j]+source[j+1]+source[j+2]; else if(j+2==N) sums[i]=source[j]+source[j+1]; } /* * @brief performs a sum of pairs of N numbers in source, using locations indicated by pointers. pointers has indices multiplied by genomeSize. intended to be run multiple times to sum over a whole array. if N is odd, the last sum index will be N/2-1 and contain the sum of the last 3 numbers * * @param sums where to store the sums * @param source where to get the numbers to sum together * @param N the dimension of source * @param ptrs the indices to use when gathering pairs for summation * @param genomeSize the number by which the indices in ptrs are scaled * * @return */ __global__ void sumEmIndex(float *sums, float *source, int N, const int *ptrs, const int genomeSize){ int i=blockIdx.x*blockDim.x+threadIdx.x; int j=(i<<1); if(j+3<N)sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize]; else if(j+3==N) sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize]+source[ptrs[j+2]/genomeSize]; else if(j+2==N) sums[i]=source[ptrs[j]/genomeSize]+source[ptrs[j+1]/genomeSize]; #if DEBUG>1 if(j+2<=N)printf(" %d:%f",i,sums[i]); #endif } /*******************************| end of helper function |***************************************/ /************************************************************************************************* * | function 6: getSumAreas | * * ---------uses sumEmIndex and sumEM-------- * * * * @brief get sum of all areas * * * @param areas_d pointer to areas on device * * @param ptrs_d pointer to indices for each individual in population * @param pSize population size * @param temp_d pointer to temporary array on device * @param genomeSize number of alleles in genome ************************************************************************************************/ float *getSumAreas(float *areas_d, int *ptrs_d, int pSize, float *temp_d, const int & genomeSize){ int dim=pSize; int offset=0; /* the triple chevron below describes an execution configuration the first argument(((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE) in the execution configuration specifies the number of thread blocks in the grid, and the second specifies (BLOCK_SIZE) the number of threads in a thread block */ sumEmIndex <<<((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (temp_d, areas_d, dim, ptrs_d, genomeSize); #if DEBUG>1 std::cout << std::endl; #endif pSize >>= 1; while((dim>>=1)>1){ offset^=pSize; sumEm <<<((dim>>1)+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (temp_d+offset, temp_d+(offset^pSize), dim); #if DEBUG>1 std::cout << std::endl; #endif } return temp_d+offset; } /* /////////////////////////////////////////////////////// ` ////////////////////////////////// ` ///////////////////// | | ///////////// ~ ~ ~ ~ ~ ~ ~ //////// | | ///// ____| |____ /// | | // ___| J.M |___ / | K.B | / PROGRAM BEGINS HERE | | **************************************************************************************************/ /************************************************************************************************* argc is a vairable with the number of arguments passed to GenA argv is a vector of strings representing the the arguments the GenA takes To run genA: ./genA -p parmfile < inputfile > outputfile parameters in the parmfile psize: population size, 1000-2000 nGen: number of generations, > 100000 pMut: probability of mutation, 0.01 - 0.001 max: maximal permissible mutation, 0.5 - 0.001 pCross: probability of crossover 0.8-0.9 randomseed: sixdigit random number, upon mutation, the lowest bit of the random number is used to determine whether the amplitude or the phase shift will change. input file: parametersfitting data using the following format: _____________________________________________________________________ -<dihedral> <AMBER atom type for dihedral 1> | -<dihedral> <AMBER atom type for dihedral 2> | <name of data set> <dihedral 1> <dihedral 2> | <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> | <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> | ... | / | <name of data set> <dihedral 1> <dihedral 2> | <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> | <dihedral 1 value> <dihedral 2 value> <E_QM> <E_MM> | ... | / | _____________________________________________________________________| <dihedral> is the name of dihedral e.g phi, psi, chi1, chi2, chi3, etc <AMBER atom type for dihedral 1> e.g chi1 is N -CX-2C-2C for Met, get from frcmod file <name of data set> is any name, e.g Metalpha, Metbeta, Metcharge <dihedral 1 value> this is the dihedral value (deg) of the optimized QM structures e.g 105.62 <E_QM> the QM energy of conformation i with restraint dihedral <E_MM> the MM energy of conformation i with restraint dihedral with zeroed dihedral parameters in the frcmod ... repeat for all conformations within a break / (refer to as break (brk)) a break seperate conformations that are different e.g alpha backbone, beta backbone, charge amino acids GOODLUCK!!! [ O O ] [ b ' ] [ ----- ] contact: [email protected] with genA title for help ********************************************************************************************************/ int main(int argc, char *argv[]){ /* load genA parameters, see above */ cudaError_t error; if (!(argv[1]=="-p")) std::cout << "please use -p for param file"; ConfigFile cfg(argv[2]); // check if keys exixt if (!(cfg.keyExists("pSize"))) std::cout << "oops you forgot pSize"; //make it for others //add the rest of parameters if exist as line above // Retreive the value of keys pSize if key dont exist return value will be 1 //add new parameters here int pSize = cfg.getValueOfKey<int>("pSize", 1); std::cout << "Population Size (pSize): " << pSize << "\n\n"; int nGen = cfg.getValueOfKey<int>("nGen", 1); std::cout << "Number of Generations (nGen): " << nGen << "\n\n"; float pMut = cfg.getValueOfKey<float>("pMut", 1); std::cout << "Probability of Mutations (pMut): " << pMut << "\n\n"; float max = cfg.getValueOfKey<float>("max", 1); std::cout << "Maximal permissible mutation (max): " << max << "\n\n"; float pCross = cfg.getValueOfKey<float>("pCross", 1); std::cout << "Probability of crossover (pCross): " << pCross << "\n\n"; int rseed = cfg.getValueOfKey<int>("rseed", 1); std::cout << "Random seed (rseed): " << rseed << "\n\n"; int peng = cfg.getValueOfKey<int>("peng", 1); std::cout << "Print scores every " << peng << "generations (peng)\n\n"; int ncp = cfg.getValueOfKey<int>("ncp", 1); std::cout << "Print scores of only " << ncp << " chromosomes every peng \n\n"; /* initializing CPU variables and arrays */ int genomeSize, g, N, nConf=0, save=pSize/10; float *rands, *Vs, *tset, *tgts, *wts, *scores; int *ptrs, *breaks, nBreaks; /* initializing GPU variables and arrays */ size_t nRands; curandGenerator_t gen; int *ptrs_d, *breaks_d; float *rands_d, *Vs_d, *tset_d, *tgts_d, *wts_d, *xx_d, *scores_d, *areas_d; /*specify the string of the savefile, scorefile, loadfile name */ std::string saveFile,loadFile,scoreFile,scoreTest; /* dealing with loading the input file and save file string name */ for (int i=2;i<argc;i++){ if(i+1<argc){ if(argv[i][0]=='-'&&argv[i][1]=='r')saveFile=argv[++i]; else if(argv[i][0]=='-'&&argv[i][1]=='c')loadFile=argv[++i]; else if(argv[i][0]=='-'&&argv[i][1]=='s')scoreFile=argv[++i]; else if(argv[i][0]=='-'&&argv[i][1]=='f')scoreTest=argv[++i]; } } /***************************| load data from load.cpp |****************************************** * Initializing host data(Normally the 2nd step) * * check load.cpp for this section * * map is a way to create a dictionary, correction map is an array with key * ************************************************************************************************/ /* initiating container with key and values name correctionMap */ std::map<std::string,DihCorrection> correctionMap; std::cout << "Input file loaded ('_')" << std::endl; /* load in arrays generated from load.cpp, check it out for further comments & specifies the addrress, loading the variables that contain address of another variable correctionMap is ..... */ load(std::cin, &tset, &tgts, &wts, &nConf, &breaks, &nBreaks, &genomeSize, correctionMap); /*******************************| memory allocation |******************************************** *************************************************************************************************/ /* first cudaMalloc to initialize the CUDA subsystem cudaMalloc allocates size bytes of linear memory on the device and returns in *devPtr a pointer to the allocated memory. It takes two parameters: (1) devPtr - Pointer to allocated device memory e.g variable &breaks_d that have the address of the the variable breaks_d (stored in memory) (2) size - Requested allocation size in bytes, which is nBreaks */ #if DEBUG && 0 for(int i=0;i<nConf;i++){ for(int j=0;j<genomeSize;j++) std::cerr << ' ' << tset[i*genomeSize+j]; std::cerr << std::endl; } std::cerr << tgts[0] << ' ' << tgts[1] << ' ' << tgts[2] << ' ' << tgts[3] << std::endl; std::cerr << "first cudaMalloc, " << nBreaks << " breaks" << std::endl; #endif /* we are allocating space on the GPU to store four arrays (breaks_d, tgts_d, wts_d, tset_d) with size specified below. The size (# of elements the array can hold, which is directly related to memory to store each element in the array) of the array on the GPU is a lot larger than the host array at this point in the algorithm. Later we will add results to these arrays. */ cudaMalloc((void **)&breaks_d, nBreaks*sizeof(int)); cudaMalloc((void **)&tgts_d, (nBreaks-1+nConf*(1+genomeSize))*sizeof(float)); wts_d=tgts_d+nConf; tset_d=wts_d+nBreaks-1; /********************************| transfer data from host to device |**************************** * copy data arrays from CPU host (breaks,tset,tgts,wts) to device GPU (breaks_d, etc) * *************************************************************************************************/ #if DEBUG std::cerr << "COPY" << std::endl; #endif /* copying over the arrays from the CPU to GPU nbreaks is the # of dataset + 1. e.g if you are doing alpha and beta backbone set then nbreaks=3 genomesize is the # of fitting dihedral * periodicity, e.g 3 set of dihedral * 4 periodicity = 12 nconf is the # of conformations you are fitting tset is (E_QMi-E_MMi) + (E_MMref-E_QMref) for each conformation, which = nconf, see load.cpp tgts is the cos(dih*periodicity) for 4 periodicity for a dihedral for each conformation so 20 conf will give tgts of 20 (nconf) * 12 (# of dih * periodicity) = 120 */ cudaMemcpy(breaks_d, breaks, nBreaks*sizeof(breaks[0]), cudaMemcpyHostToDevice); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} cudaMemcpy(tset_d, tset, nConf*genomeSize*sizeof(float), cudaMemcpyHostToDevice); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} cudaMemcpy(tgts_d, tgts, nConf*sizeof(float), cudaMemcpyHostToDevice); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} cudaMemcpy(wts_d, wts, (nBreaks-1)*sizeof(*wts), cudaMemcpyHostToDevice); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} /**********************| initiate GPU blocks and # of random variable |*************************** * we need randoms, new pop 3xcrossover, genomeSizexmut * * genome size is the number of genes which is all the parameters, * * e.g for 4 periodicity and three dihedral fitting, then genomesize will be 4 * 3 = 12 * * nRands is number of randoms we need for each set of parameters * * e.g if psize (population size) is 10, then number of random number we will need is * * (3+(# of periodicity x # of dihedral)) * psize * * so for 4 periodicity and 3 dihedral fitting (chi1 chi2 chi3), then nRands = 3+12 * 10 = 150 * *________________________________________________________________________________________________* * nBlocks is dependent on the population size, it is use to figure out how many GPU blocks * * we need to initialize the arrays for calculations. Each block has 256 threads. * * one thread represent one individual (chromosome with soln parameters) from the population * * e.g population size of 2000 will require (2000+256-1)/256 = 8.81 => 8 blocks * * * *************************************************************************************************/ nRands=(3+genomeSize)*pSize; int nBlocks=(pSize+BLOCK_SIZE-1)/BLOCK_SIZE; #ifdef DEBUG std::cerr << nRands << "nRands\n"; std::cerr << nBlocks << " blocks\n"; #endif /*******************************| initializing more host and device variables|************************ * N (bitwise operation below) is the pSize (1st input) multiply by 2; * * initiating the chromosomes which have the solns * ************************************************************************************************/ #if DEBUG printf("Allocate memory\n"); #endif rands=(float *)malloc(nRands*sizeof(float)); //cudaMalloc((void **)&rands_d, nRands*sizeof(float)); N=(pSize<<1); HANDLE_ERROR(cudaMalloc((void **)&Vs_d, (N*(genomeSize+4)+pSize*nConf+nRands)*sizeof(float))); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} rands_d=Vs_d+N*genomeSize; scores_d=rands_d+nRands; areas_d=scores_d+(N<<1); xx_d=areas_d+(N<<1); scores=(float *)malloc(sizeof(*scores)*N); float *scores_ds[2]; scores_ds[0]=scores_d; scores_ds[1]=scores_d+N; Vs=(float *)malloc(N*genomeSize*sizeof(float)); /*allocate the memory space to hold array of pointers (prts) of size N (2*pSize) these pointers point to the individuals (chromosome) in the population */ ptrs=(int *)malloc(sizeof(int)*N); ptrs[0]=0; for(g=1;g<N;g++)ptrs[g]=ptrs[g-1]+genomeSize; HANDLE_ERROR(cudaMalloc((void **)&ptrs_d, N*2*sizeof(int))); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} int *ptrs_ds[2]; ptrs_ds[0]=ptrs_d; ptrs_ds[1]=ptrs_d+N; cudaMemcpy(ptrs_d, ptrs, sizeof(int)*N, cudaMemcpyHostToDevice); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} int curList=0; #if 0 HANDLE_ERROR(cudaMalloc((void **)&scores_d, N*sizeof(float))); HANDLE_ERROR(cudaMalloc((void **)&xx_d, nOffspring*nConf*sizeof(float))); #endif /* for CUDA beginners on thrust thrust is a c++ template library for CUDA similar to STL it have two containers: thrust::host_vector<type> and thrust::device_vector<type> the containers make common operations such as cudaMalloc, cudaFree, cudaMemcpy, more concise e.g thrust::host_vector<int> vec_h(2) will allocate host vector with 2 elements thrust::device_vectore<int> vec_d = vec_h will copy host vector to device this will allow you to directly manipulate device values from the host so vec_d[0] = 5; can be done from host and once you output vector memory is automatically released std::cout << "my vector" << vec_d[0] << std::endl; it have a few algorithms, we use thrust::sort(), */ thrust::device_ptr<int> dPtrs(ptrs_d), dPtrs_save(ptrs_d+save); thrust::device_ptr<float> dScores(scores_d), dVs(Vs_d); thrust::device_ptr<float> dScores_save(scores_d+save), dScores_pSize(scores_d+pSize), dScores_N(scores_d+N); /**************************| Create a random generator |******************************************** * curandCreateGenerator takes two parameters: pointer to generator (*gen), type of generator * Once created,random number generators can be defined using the general options seed, offset,& order* When rng_type is CURAND_RNG_PSEUDO_DEFAULT, the type chosen is CURAND_RNG_PSEUDO_XORWOW * *__________________________________________________________________________________________________* *curandSetPseudoRandomGeneratorSeed takes two parameters (1) the generator (gen) & (2) seed value * * seed value # is used to initialize the generator and control the set of random numbers; * * same seed will the give same set of random numbers of the psuedorandom generator * * rseed is the random number specified from the 6th input) * *__________________________________________________________________________________________________* * curandGenerateNormal take 5 parameters: * * (1) generator - Generator to use * * (2) outputPtr - Pointer to device memory to store CUDA-generated results, * or Pointer to host memory to store CPU-generated resluts * * (3) num - Number of floats to generate * * (4) mean - Mean of normal distribution * * (5) stddev - Standard deviation of normal distribution * * Results are 32-bit floating point values with mean and standard deviation. * ***************************************************************************************************/ #if DEBUG printf("Create random generator\n"); #endif // create the generator name gen curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); #if DEBUG printf("Seed random generator\n"); #endif // initiate the generator with the random seed (rseed) curandSetPseudoRandomGeneratorSeed(gen, rseed); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (seed)\n", cudaGetErrorString(error));} #if DEBUG std::cerr << "GenerateNormal" << std::endl; #endif curandGenerateNormal(gen, Vs_d, N*genomeSize, 0, 1); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (normal)\n", cudaGetErrorString(error));} /***************************| END of random generator part |***************************************/ // if we have a load file copy Vs (amplitude parameters) from the loaded file and populate Vs if(!loadFile.empty()) { std::ifstream loadS(loadFile.c_str(), std::ios::in | std::ios::binary); loadS.read((char*)Vs,pSize*genomeSize*sizeof(*Vs)); cudaMemcpy(Vs_d, Vs, pSize*genomeSize*sizeof(*Vs), cudaMemcpyHostToDevice); } /* timing event */ cudaEvent_t events[3]; int nevents = (sizeof events) / (sizeof events[0]); for (int i = 0; i < nevents ; ++i) cudaEventCreate(events+i, 0); cudaEventRecord(events[0], 0); /***************************| score of the first set of chromosomes |******************************* * Here we score initial chromsomes * ***************************************************************************************************/ #if DEBUG std::cerr << "1stscore" << std::endl; #endif /* lauch first kernel to score the initial set of chromsomes (Vs_d) and output scores in scores_ds betweem the triple chervon is called the execution configuration that takes two parts 1st part takes the number of thread blocks and the second part take the number of threads in a block */ scoreIt <<<(N+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList], areas_d, Vs_d, ptrs_ds[curList], tset_d, tgts_d, wts_d, breaks_d, nConf, pSize, genomeSize, xx_d); /* score of chromosomes out of psize since we initiated 2 times psize */ scoreIt <<<(N+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList]+pSize, areas_d, Vs_d, ptrs_ds[curList]+pSize, tset_d, tgts_d, wts_d, breaks_d, nConf, pSize, genomeSize, xx_d); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (1stscore)\n", cudaGetErrorString(error));} #if DEBUG std::cerr << "1stsort" << std::endl; #endif /* sort the scores from each chromosome of the initial population */ thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]), thrust::device_pointer_cast(scores_ds[curList]+N), thrust::device_pointer_cast(ptrs_ds[curList])); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (1stsort)\n", cudaGetErrorString(error));} /* option to copy over scores from GPU device to CPU host */ #if DEBUG>2 cudaMemcpy(scores, scores_ds[curList], sizeof(*scores)*N, cudaMemcpyDeviceToHost); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} cudaMemcpy(Vs, Vs_d, sizeof(*Vs)*N*genomeSize, cudaMemcpyDeviceToHost); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} cudaMemcpy(ptrs, ptrs_ds[curList], sizeof(*ptrs)*N, cudaMemcpyDeviceToHost); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} /* i is each chromosome, scores[i] is scores, Vs[ptrs[i]] is the amplitude parameters; Vs[ptrs[i]]+n specifies the next n amplitude. e.g chromosome i have genomesize amplitude parms e.g Vs[ptrs[i]]+1 is the amplitude term when the periodicity is 3 for the 1st dihedral being fitted, and Vs[ptrs[i]]+4, the amplitude term when the periodicity is 4 for the 2nd dihedral */ for(int i=0;i<N;i++){ std::cerr << i << ": [" << ptrs[i] << "] = " << scores[i] << " {"<<Vs[ptrs[i]]<<" "<<Vs[ptrs[i]+1]<<" "<<Vs[ptrs[i]+2]<<" "<<Vs[ptrs[i]+3]<<"}\n"; } #endif cudaEventRecord(events[1], 0); /****************************| Let us begin the iterations through generations |******************** Genetic algorithm iterations through the number of generations or isolation time ****************************************************************************************************/ //std::cout << "There is " << nGen << " generations" << " and " << N << " number of chromosomes (2 x population size)" << std::endl; /* for loop for the generation */ for(g=0;g<nGen;g++){ /*************************| Step1: Generate random numbers |****************************************/ #if DEBUG>1 printf("Generate random numbers\n"); printf(" %d",g);fflush(stdout); #endif // create an array of random numbers (rands_d) used for mutations and crossover where the number of random #s is nRands curandGenerateUniform(gen, rands_d, nRands); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} /***| Step2: calculate the probabilities (areas) each individual (chromosome) has of mating |******/ #if DEBUG>2 std::cerr << "Mate" << std::endl; #endif calcAreas <<<nBlocks, BLOCK_SIZE>>> (scores_ds[curList], areas_d, ptrs_d, pSize, genomeSize); /***| Step3: mate the individuals (chromosomes,Parent[0],[1]) selected for the next generation |***/ mateIt <<<nBlocks, BLOCK_SIZE>>> (Vs_d, ptrs_ds[curList], areas_d, getSumAreas(areas_d, ptrs_ds[curList], pSize, areas_d+N, genomeSize), rands_d, pCross, pSize, genomeSize); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (mate)\n", cudaGetErrorString(error));} /*****************| Step4: mutate individuals generated after mating |*****************************/ #if DEBUG>2 std::cerr << "Mutate" << std::endl; #endif mutateIt <<<nBlocks, BLOCK_SIZE>>> (Vs_d, ptrs_ds[curList]+pSize, rands_d+pSize*3, pSize, pMut, max, genomeSize); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (mutate)\n", cudaGetErrorString(error));} /**************| Step5: Score the individuals to select for the next generation |*******************/ #if DEBUG>2 std::cerr << "Score" << std::endl; #endif scoreIt <<<nBlocks, BLOCK_SIZE>>> (scores_ds[curList]+pSize, areas_d, Vs_d, ptrs_ds[curList]+pSize, tset_d, tgts_d, wts_d, breaks_d, nConf, pSize, genomeSize, xx_d); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s (score)\n", cudaGetErrorString(error));} #if DEBUG>2 //std::cerr << "Display em:\n\tCopy scores" << std::endl; cudaMemcpy(scores, scores_ds[curList], sizeof(*scores)*N, cudaMemcpyDeviceToHost); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} //std::cerr << "\tCopy Vs" << std::endl; cudaMemcpy(Vs, Vs_d, sizeof(*Vs)*N*genomeSize, cudaMemcpyDeviceToHost); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} //std::cerr << "\tCopy ptrs" << std::endl; cudaMemcpy(ptrs, ptrs_ds[curList], sizeof(*ptrs)*N, cudaMemcpyDeviceToHost); if((error=cudaGetLastError())!=cudaSuccess){fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(error));} for(int i=0;i<N;i++){ /* below you can print the scores for a chromosomes every generation */ std::cout << "This is Generation: " << g << " and Chromosome (set of parameters): " << i << std::endl; std::cout << "Score: " << scores[i] << std::endl; /* below you can print out the scores and the first four barrier parameters,since we are using 4 periodicity, the first 4 barrier parameters are for the 1st dihedral in the input file */ //std::cout << i << ": [" << ptrs[i] << "] = " << scores[i] << " {"<<Vs[ptrs[i]]<<" "<<Vs[ptrs[i]+1]<<" "<<Vs[ptrs[i]+2]<<" "<<Vs[ptrs[i]+3]<<"}\n"; std::cerr << i << ": [" << ptrs[i] << "] = " << scores[i] << " {"<<Vs[ptrs[i]]<<" "<<Vs[ptrs[i]+1]<<" "<<Vs[ptrs[i]+2]<<" "<<Vs[ptrs[i]+3]<<"}\n"; } #endif /*****| Step6: Sort the scored chromosomes (individuals) & select for mating for next generation |**/ #if DEBUG>2 std::cerr << "Move 1" << std::endl; #endif moveEm <<<(save+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList^1], ptrs_ds[curList^1], scores_ds[curList], ptrs_ds[curList], save); #if DEBUG>2 std::cerr << "Move 2" << std::endl; #endif moveEm <<<(pSize+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList^1]+save, ptrs_ds[curList^1]+save, scores_ds[curList]+pSize, ptrs_ds[curList]+pSize, pSize);//nOffspring); #if DEBUG>2 std::cerr << "Move 3" << std::endl; #endif moveEm <<<(pSize-save+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>> (scores_ds[curList^1]+save+pSize, ptrs_ds[curList^1]+save+pSize, scores_ds[curList]+save, ptrs_ds[curList]+save, pSize-save); curList^=1; /* first sort only the ones that aren't going to be saved (elitist) */ #if DEBUG>1 std::cerr << "Selection sort (" << N << " items, less " << save << ")" << std::endl; #endif thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]+save), thrust::device_pointer_cast(scores_ds[curList]+pSize+save), thrust::device_pointer_cast(ptrs_ds[curList]+save)); /* then sort all those that fit within pSize */ #if DEBUG>1 std::cerr << "Rank sort" << std::endl; #endif thrust::sort_by_key(thrust::device_pointer_cast(scores_ds[curList]), thrust::device_pointer_cast(scores_ds[curList]+pSize), thrust::device_pointer_cast(ptrs_ds[curList])); /**************************************************************************************************** * Here you can print the score of chromosomes (total is 2 x population size) for each generation * * by uncommenting the if and end DEBUG statement, need to make this an input option * * such as -s which will mean print scores * ****************************************************************************************************/ //peng --> print every n generation, make a user option //ncp --> number of chromosomes to print, make a user option as well //if generation is divisable by peng if(g%peng==0) { std::ofstream scorefile; scorefile.open (scoreFile.c_str(), ios::out | ios::app); //it append to the writeout so make sure u delete scores file scorefile << "#Generation" << std::setw(14) << "Chromosomes" << std::setw(12) << "Scores\n"; cudaMemcpy(scores, scores_ds[curList], sizeof(*scores)*N, cudaMemcpyDeviceToHost); //cudaMemcpy(Vs, Vs_d, sizeof(*Vs)*N*genomeSize, cudaMemcpyDeviceToHost); //cudaMemcpy(ptrs, ptrs_ds[curList], sizeof(*ptrs)*N, cudaMemcpyDeviceToHost); if (ncp > pSize) { printf("Parmfile error: ncp should be smaller than psize! \n"); std::abort(); } for(int m=0;m<ncp;m++){ scorefile << std::setw(6) << g << std::setw(14) << m << std::setw(18) << scores[m] << "\n"; //scorefile << "Score: " << scores[m] << "\n"; //for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){ } scorefile.close(); } } // here the loop for generations ends cudaEventRecord(events[2], 0); /**************************************************************************************************** * TERMINATION, LAST RESULTS< SCORES AND PARAMETERS FOR EACH INDIVIDUAL ****************************************************************************************************/ /* copy over the end result from GPU to the CPU to save the scores and parameters */ cudaMemcpy(Vs, Vs_d, sizeof(float)*genomeSize*N, cudaMemcpyDeviceToHost); cudaMemcpy(ptrs, ptrs_ds[curList], sizeof(int)*N, cudaMemcpyDeviceToHost); cudaMemcpy(scores, scores_ds[curList], sizeof(float)*N, cudaMemcpyDeviceToHost); std::ofstream scoretest; scoretest.open (scoreTest.c_str(), ios::out | ios::trunc); //scoretest << "#Generation" << std::setw(14) << "Chromosomes" << std::setw(12) << "Scores\n"; for(int i=0;i<pSize;i++){ /* these are the final scores for each individual in the population, print in the output file */ //scoretest << std::setw(14) << i << std::setw(18) << scores[i] << "\n"; scoretest << std::setw(8) << scores[i] << "\n"; //std::cout << std::fixed << scores[i] << std::endl; for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){ /* second.setGenome(Vs+ptrs[i]) is the dihedral parameters for each individual in the population print in the output file */ //std::cout << it->second.setGenome(Vs+ptrs[i]); scoretest << std::setw(11) << it->second.setGenome(Vs+ptrs[i]); } } if(!saveFile.empty()){ std::ofstream saveS(saveFile.c_str(), std::ios::out | std::ios::binary); for(int i=0;i<pSize;i++) saveS.write((char *)(Vs+ptrs[i]),genomeSize*sizeof(*Vs)); } scoretest.close(); cudaEventSynchronize(events[nevents-1]); float elapsedTimeInit, elapsedTimeCompute; cudaEventElapsedTime(&elapsedTimeInit, events[0], events[1]); cudaEventElapsedTime(&elapsedTimeCompute, events[1], events[2]); std::cout << "Initialization time: " << elapsedTimeInit * 1e-3 << std::endl; std::cout << "Computation time: " << elapsedTimeCompute * 1e-3 << std::endl; #if 0 std::cout << scores[pSize] << std::endl; for(std::map<std::string,DihCorrection>::iterator it=correctionMap.begin(); it!=correctionMap.end(); ++it){ std::cout << it->second.setGenome(Vs+ptrs[pSize]); //std::cout << it->second; } #endif free(ptrs); #if 0 printf("Copy random numbers\n"); cudaMemcpy(rands, rands_d, nRands*sizeof(unsigned int), cudaMemcpyDeviceToHost); printf("Print random numbers\n"); printf("%d",rands[0]); for(i=1;i<nRands;i++){ printf(" %d",rands[i]); } putchar('\n'); #endif /*****************| Free up GPU Memory |*******************************************************/ curandDestroyGenerator(gen); cudaFree(Vs_d); cudaFree(ptrs_d); cudaFree(breaks_d); cudaFree(tgts_d); free(Vs); free(scores); free(rands); return 0; }
01689c1d839bc6947517d89c2ec5e876c0a91f7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2015, Nils Moehrle * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-Clause license. See the LICENSE.txt file for details. */ #include "tracing.h" #include "primitives.h" texture<uint4, 1> nodes; texture<float4, 1> aabbs; texture<float4, 1> tris; CACC_NAMESPACE_BEGIN TRACING_NAMESPACE_BEGIN void bind_textures(BVHTree<DEVICE>::Data const bvh_tree) { assert(sizeof(BVHTree<DEVICE>::Node) == sizeof(uint4)); assert(sizeof(AABB) == 2 * sizeof(float4)); assert(sizeof(Tri) == 3 * sizeof(float4)); CHECK(hipBindTexture(NULL, nodes, bvh_tree.nodes_ptr, bvh_tree.num_nodes * sizeof(BVHTree<DEVICE>::Node))); CHECK(hipBindTexture(NULL, aabbs, bvh_tree.aabbs_ptr, bvh_tree.num_nodes * 2 * sizeof(float4))); CHECK(hipBindTexture(NULL, tris, bvh_tree.tris_ptr, bvh_tree.num_faces * 3 * sizeof(float4))); } __device__ __forceinline__ BVHTree<DEVICE>::Node load_node(uint idx) { BVHTree<DEVICE>::Node node; node.rllf = tex1Dfetch(nodes, idx); return node; } __device__ __forceinline__ AABB load_aabb(uint idx) { AABB aabb; float4 min = tex1Dfetch(aabbs, 2 * idx + 0); aabb.min = Vec3f(min.x, min.y, min.z); float4 max = tex1Dfetch(aabbs, 2 * idx + 1); aabb.max = Vec3f(max.x, max.y, max.z); return aabb; } __device__ __forceinline__ Tri load_tri(uint idx) { Tri tri; float4 a = tex1Dfetch(tris, 3 * idx + 0); tri.a = Vec3f(a.x, a.y, a.z); float4 b = tex1Dfetch(tris, 3 * idx + 1); tri.b = Vec3f(b.x, b.y, b.z); float4 c = tex1Dfetch(tris, 3 * idx + 2); tri.c = Vec3f(c.x, c.y, c.z); return tri; } __device__ void trace(BVHTree<DEVICE>::Data const bvh_tree, Ray const ray, uint * hit_face_id_ptr) { const int tx = threadIdx.x; float t = inf; uint hit_face_id = NAI; uint gstack[GSTACK_SIZE]; uint __shared__ sstack[SSTACK_SIZE * TRACE_BLOCK_SIZE]; uint node_idx = 0; int stack_idx = -1; while (true) { BVHTree<DEVICE>::Node node; node = load_node(node_idx); if (node.left != NAI && node.right != NAI) { float tmin_left, tmin_right; AABB aabb_left = load_aabb(node.left); bool left = intersect(ray, aabb_left, &tmin_left); AABB aabb_right = load_aabb(node.right); bool right = intersect(ray, aabb_right, &tmin_right); if (left && right) { uint other; if (tmin_left < tmin_right) { other = node.right; node_idx = node.left; } else { other = node.left; node_idx = node.right; } if (++stack_idx < SSTACK_SIZE) sstack[SSTACK_SIZE * tx + stack_idx] = other; else gstack[stack_idx] = other; } else { if (right) node_idx = node.right; if (left) node_idx = node.left; } if (!left && !right) { if (stack_idx < 0) break; if (stack_idx < SSTACK_SIZE) node_idx = sstack[SSTACK_SIZE * tx + stack_idx--]; else node_idx = gstack[stack_idx--]; } } else { for (uint i = node.first; i < node.last; ++i) { Tri tri = load_tri(i); if (intersect(ray, tri, &t)) { hit_face_id = bvh_tree.indices_ptr[i]; } } if (stack_idx < 0) break; if (stack_idx < SSTACK_SIZE) node_idx = sstack[SSTACK_SIZE * tx + stack_idx--]; else node_idx = gstack[stack_idx--]; } } *hit_face_id_ptr = hit_face_id; } TRACING_NAMESPACE_END CACC_NAMESPACE_END
01689c1d839bc6947517d89c2ec5e876c0a91f7b.cu
/* * Copyright (C) 2015, Nils Moehrle * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD 3-Clause license. See the LICENSE.txt file for details. */ #include "tracing.h" #include "primitives.h" texture<uint4, 1> nodes; texture<float4, 1> aabbs; texture<float4, 1> tris; CACC_NAMESPACE_BEGIN TRACING_NAMESPACE_BEGIN void bind_textures(BVHTree<DEVICE>::Data const bvh_tree) { assert(sizeof(BVHTree<DEVICE>::Node) == sizeof(uint4)); assert(sizeof(AABB) == 2 * sizeof(float4)); assert(sizeof(Tri) == 3 * sizeof(float4)); CHECK(cudaBindTexture(NULL, nodes, bvh_tree.nodes_ptr, bvh_tree.num_nodes * sizeof(BVHTree<DEVICE>::Node))); CHECK(cudaBindTexture(NULL, aabbs, bvh_tree.aabbs_ptr, bvh_tree.num_nodes * 2 * sizeof(float4))); CHECK(cudaBindTexture(NULL, tris, bvh_tree.tris_ptr, bvh_tree.num_faces * 3 * sizeof(float4))); } __device__ __forceinline__ BVHTree<DEVICE>::Node load_node(uint idx) { BVHTree<DEVICE>::Node node; node.rllf = tex1Dfetch(nodes, idx); return node; } __device__ __forceinline__ AABB load_aabb(uint idx) { AABB aabb; float4 min = tex1Dfetch(aabbs, 2 * idx + 0); aabb.min = Vec3f(min.x, min.y, min.z); float4 max = tex1Dfetch(aabbs, 2 * idx + 1); aabb.max = Vec3f(max.x, max.y, max.z); return aabb; } __device__ __forceinline__ Tri load_tri(uint idx) { Tri tri; float4 a = tex1Dfetch(tris, 3 * idx + 0); tri.a = Vec3f(a.x, a.y, a.z); float4 b = tex1Dfetch(tris, 3 * idx + 1); tri.b = Vec3f(b.x, b.y, b.z); float4 c = tex1Dfetch(tris, 3 * idx + 2); tri.c = Vec3f(c.x, c.y, c.z); return tri; } __device__ void trace(BVHTree<DEVICE>::Data const bvh_tree, Ray const ray, uint * hit_face_id_ptr) { const int tx = threadIdx.x; float t = inf; uint hit_face_id = NAI; uint gstack[GSTACK_SIZE]; uint __shared__ sstack[SSTACK_SIZE * TRACE_BLOCK_SIZE]; uint node_idx = 0; int stack_idx = -1; while (true) { BVHTree<DEVICE>::Node node; node = load_node(node_idx); if (node.left != NAI && node.right != NAI) { float tmin_left, tmin_right; AABB aabb_left = load_aabb(node.left); bool left = intersect(ray, aabb_left, &tmin_left); AABB aabb_right = load_aabb(node.right); bool right = intersect(ray, aabb_right, &tmin_right); if (left && right) { uint other; if (tmin_left < tmin_right) { other = node.right; node_idx = node.left; } else { other = node.left; node_idx = node.right; } if (++stack_idx < SSTACK_SIZE) sstack[SSTACK_SIZE * tx + stack_idx] = other; else gstack[stack_idx] = other; } else { if (right) node_idx = node.right; if (left) node_idx = node.left; } if (!left && !right) { if (stack_idx < 0) break; if (stack_idx < SSTACK_SIZE) node_idx = sstack[SSTACK_SIZE * tx + stack_idx--]; else node_idx = gstack[stack_idx--]; } } else { for (uint i = node.first; i < node.last; ++i) { Tri tri = load_tri(i); if (intersect(ray, tri, &t)) { hit_face_id = bvh_tree.indices_ptr[i]; } } if (stack_idx < 0) break; if (stack_idx < SSTACK_SIZE) node_idx = sstack[SSTACK_SIZE * tx + stack_idx--]; else node_idx = gstack[stack_idx--]; } } *hit_face_id_ptr = hit_face_id; } TRACING_NAMESPACE_END CACC_NAMESPACE_END
b942e274d5dfb108a15d3b285b32886a180e1000.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdio.h> #include <stdlib .h> #include <hip/hip_runtime.h> #include <cuda runtime.h> __global__ void add2(int a) { int i = threadIdx.x; a[i] = a[i] + 8; } int main() { const int N = 8; int a[N] = {0,2,43,21,22,45,12,23}; size_t size = N * sizeof(int); int* a_d; hipMalloc(&a_d, size); hipMemcpy(a_d, a, size, hipMemcpyHostToDevice); add2<< <1, 8 >> > (a_d); hipMemcpy(a, a_d, size, hipMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("a = %d\n", a[i]); } hipFree(a_d); }
b942e274d5dfb108a15d3b285b32886a180e1000.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdio.h> #include <stdlib .h> #include <cuda.h> #include <cuda runtime.h> __global__ void add2(int ∗a) { int i = threadIdx.x; a[i] = a[i] + 8; } int main() { const int N = 8; int a[N] = {0,2,43,21,22,45,12,23}; size_t size = N * sizeof(int); int* a_d; cudaMalloc(&a_d, size); cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice); add2<< <1, 8 >> > (a_d); cudaMemcpy(a, a_d, size, cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("a = %d\n", a[i]); } cudaFree(a_d); }
9c20893e4cc57401437c9c04b5e4afb784c6b238.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "dali/operators/generic/one_hot.h" #include "dali/operators/generic/one_hot.cuh" namespace dali { class OneHotGPU : public OneHot<GPUBackend> { public: explicit OneHotGPU(const OpSpec &spec) : OneHot<GPUBackend>(spec) { scratch_mem_.set_type<uint8_t>(); } ~OneHotGPU() override = default; USE_OPERATOR_MEMBERS(); protected: void RunImpl(workspace_t<GPUBackend> &ws) override; bool SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) override; template<typename OutputType, typename InputType> void RunImplTyped(workspace_t<GPUBackend> &ws, int placement_axis); private: std::vector<detail::SampleDesc> sample_descs_; Tensor<GPUBackend> scratch_mem_; int recent_n_samples_ = 0; }; bool OneHotGPU::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) { const auto &input = ws.template InputRef<GPUBackend>(0); int num_samples = input.shape().num_samples(); if (num_samples != recent_n_samples_) { recent_n_samples_ = num_samples; int64_t samples_size = num_samples * sizeof(detail::SampleDesc); scratch_mem_.Resize({samples_size}); } sample_descs_.clear(); sample_descs_.reserve(num_samples); return OneHot<GPUBackend>::SetupImpl(output_desc, ws); } void OneHotGPU::RunImpl(workspace_t<GPUBackend> &ws) { const auto &input = ws.InputRef<GPUBackend>(0); auto &output = ws.OutputRef<GPUBackend>(0); int output_sample_dim = output.shape().sample_dim(); int placement_axis = get_placement_axis(output_sample_dim); output.SetLayout(GetOutputLayout(ws, placement_axis, output_sample_dim)); TYPE_SWITCH(input.type(), type2id, InputType, ONE_HOT_TYPES, ( TYPE_SWITCH(output_type_, type2id, OutputType, ONE_HOT_TYPES, ( RunImplTyped<OutputType, InputType>(ws, placement_axis); ), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT ), DALI_FAIL(make_string("Unsupported input type: ", input.type())); ); // NOLINT } template <typename OutputType, typename InputType> void OneHotGPU::RunImplTyped(workspace_t<GPUBackend> &ws, int axis) { const auto &input = ws.InputRef<GPUBackend>(0); auto &output = ws.OutputRef<GPUBackend>(0); int num_samples = input.shape().num_samples(); uint64_t max_out_vol = 1; const auto &shape = output.shape(); for (int sample_id = 0; sample_id < num_samples; ++sample_id) { detail::SampleDesc sample; auto output_shape = shape.tensor_shape_span(sample_id); auto outer_vol = volume(output_shape.begin(), output_shape.begin() + axis); sample.inner_vol = volume(output_shape.begin() + axis + 1, output_shape.end()); sample.inner_vol_classes = sample.inner_vol * num_classes_; sample.output_vol = outer_vol * sample.inner_vol_classes; sample.out = output.mutable_tensor<OutputType>(sample_id); sample.in = input.tensor<InputType>(sample_id); sample_descs_.push_back(sample); max_out_vol = ::max(max_out_vol, sample.output_vol); } auto stream = ws.stream(); scratch_mem_.Copy(sample_descs_, stream); const auto *scratch_mem_gpu = scratch_mem_.data<detail::SampleDesc>(); const int block = 256; auto grid = detail::gridHelper(max_out_vol, num_samples, block); hipLaunchKernelGGL(( detail::PopulateOneHot<OutputType, InputType>), dim3(grid), dim3(block), 0, stream, on_value_, off_value_, scratch_mem_gpu); } DALI_REGISTER_OPERATOR(OneHot, OneHotGPU, GPU); } // namespace dali
9c20893e4cc57401437c9c04b5e4afb784c6b238.cu
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "dali/operators/generic/one_hot.h" #include "dali/operators/generic/one_hot.cuh" namespace dali { class OneHotGPU : public OneHot<GPUBackend> { public: explicit OneHotGPU(const OpSpec &spec) : OneHot<GPUBackend>(spec) { scratch_mem_.set_type<uint8_t>(); } ~OneHotGPU() override = default; USE_OPERATOR_MEMBERS(); protected: void RunImpl(workspace_t<GPUBackend> &ws) override; bool SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) override; template<typename OutputType, typename InputType> void RunImplTyped(workspace_t<GPUBackend> &ws, int placement_axis); private: std::vector<detail::SampleDesc> sample_descs_; Tensor<GPUBackend> scratch_mem_; int recent_n_samples_ = 0; }; bool OneHotGPU::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace_t<GPUBackend> &ws) { const auto &input = ws.template InputRef<GPUBackend>(0); int num_samples = input.shape().num_samples(); if (num_samples != recent_n_samples_) { recent_n_samples_ = num_samples; int64_t samples_size = num_samples * sizeof(detail::SampleDesc); scratch_mem_.Resize({samples_size}); } sample_descs_.clear(); sample_descs_.reserve(num_samples); return OneHot<GPUBackend>::SetupImpl(output_desc, ws); } void OneHotGPU::RunImpl(workspace_t<GPUBackend> &ws) { const auto &input = ws.InputRef<GPUBackend>(0); auto &output = ws.OutputRef<GPUBackend>(0); int output_sample_dim = output.shape().sample_dim(); int placement_axis = get_placement_axis(output_sample_dim); output.SetLayout(GetOutputLayout(ws, placement_axis, output_sample_dim)); TYPE_SWITCH(input.type(), type2id, InputType, ONE_HOT_TYPES, ( TYPE_SWITCH(output_type_, type2id, OutputType, ONE_HOT_TYPES, ( RunImplTyped<OutputType, InputType>(ws, placement_axis); ), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT ), DALI_FAIL(make_string("Unsupported input type: ", input.type())); ); // NOLINT } template <typename OutputType, typename InputType> void OneHotGPU::RunImplTyped(workspace_t<GPUBackend> &ws, int axis) { const auto &input = ws.InputRef<GPUBackend>(0); auto &output = ws.OutputRef<GPUBackend>(0); int num_samples = input.shape().num_samples(); uint64_t max_out_vol = 1; const auto &shape = output.shape(); for (int sample_id = 0; sample_id < num_samples; ++sample_id) { detail::SampleDesc sample; auto output_shape = shape.tensor_shape_span(sample_id); auto outer_vol = volume(output_shape.begin(), output_shape.begin() + axis); sample.inner_vol = volume(output_shape.begin() + axis + 1, output_shape.end()); sample.inner_vol_classes = sample.inner_vol * num_classes_; sample.output_vol = outer_vol * sample.inner_vol_classes; sample.out = output.mutable_tensor<OutputType>(sample_id); sample.in = input.tensor<InputType>(sample_id); sample_descs_.push_back(sample); max_out_vol = std::max(max_out_vol, sample.output_vol); } auto stream = ws.stream(); scratch_mem_.Copy(sample_descs_, stream); const auto *scratch_mem_gpu = scratch_mem_.data<detail::SampleDesc>(); const int block = 256; auto grid = detail::gridHelper(max_out_vol, num_samples, block); detail::PopulateOneHot<OutputType, InputType><<<grid, block, 0, stream>>>( on_value_, off_value_, scratch_mem_gpu); } DALI_REGISTER_OPERATOR(OneHot, OneHotGPU, GPU); } // namespace dali
609f65771bd0643bb632ce4cb3a30becd6b8f005.hip
// !!! This is a file automatically generated by hipify!!! // Elliott Esponda & Andrew Wheeler // (1.) Copied #include <cstdlib> #include <cmath> #include <sys/time.h> #include "cs43805351.h" #include <hip/hip_runtime.h> // (2.) Done static const float Delta = 0.004f; static const float xMid = 0.2389f; static const float yMid = 0.55267f; static const int ThreadsPerBlock = 512; // (3.) Done // (4.) meet fractalKernel static __global__ void fractalKernel(const int width, const int frames, unsigned char* pic) { // compute frames const int pixels = frames * width * width; const int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < pixels) // (6.) Don't use excess { // (5.) Bye loops, hello constants const int frame = idx / (width * width); const int row = (idx / width) % width; const int col = idx % width; //for (int frame = 0; frame < frames; frame++) { const float delta = Delta * powf(0.98f, frame); const float xMin = xMid - delta; const float yMin = yMid - delta; const float dw = 2.0f * delta / width; //for (int row = 0; row < width; row++) { const float cy = yMin + row * dw; //for (int col = 0; col < width; col++) { const float cx = xMin + col * dw; float x = cx; float y = cy; int depth = 256; float x2, y2; do { x2 = x * x; y2 = y * y; y = 2 * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0f)); pic[frame * width * width + row * width + col] = (unsigned char)depth; } } static void CheckCuda() { hipError_t e; hipDeviceSynchronize(); if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, hipGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Fractal v1.7\n"); // check command line if (argc != 3) {fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]); exit(-1);} const int width = atoi(argv[1]); if (width < 10) {fprintf(stderr, "error: frame_width must be at least 10\n"); exit(-1);} const int frames = atoi(argv[2]); if (frames < 1) {fprintf(stderr, "error: num_frames must be at least 1\n"); exit(-1);} printf("computing %d frames of %d by %d fractal\n", frames, width, width); // allocate picture array (host copies) unsigned char* pic = new unsigned char[frames * width * width]; // alloc space for device copy of pic (7.) const int N = frames * width * width; unsigned char * d_pic; const int size = N * sizeof(unsigned char); hipMalloc((void **)&d_pic, size); // copy inputs to device if (hipSuccess != hipMemcpy(d_pic, pic, size, hipMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // launch GPU kernel (8.) hipLaunchKernelGGL(( fractalKernel), dim3((N + ThreadsPerBlock - 1) / ThreadsPerBlock), dim3(ThreadsPerBlock), 0, 0, width, frames, d_pic); hipDeviceSynchronize(); // (9.) Called // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.3f s\n", runtime); CheckCuda(); // (10.) CheckCuda // copy result back to host if (hipSuccess != hipMemcpy(pic, d_pic, size, hipMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // verify result by writing frames to BMP files if ((width <= 256) && (frames <= 100)) { for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "fractal%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } } delete [] pic; hipFree(d_pic); return 0; }
609f65771bd0643bb632ce4cb3a30becd6b8f005.cu
// Elliott Esponda & Andrew Wheeler // (1.) Copied #include <cstdlib> #include <cmath> #include <sys/time.h> #include "cs43805351.h" #include <cuda.h> // (2.) Done static const float Delta = 0.004f; static const float xMid = 0.2389f; static const float yMid = 0.55267f; static const int ThreadsPerBlock = 512; // (3.) Done // (4.) meet fractalKernel static __global__ void fractalKernel(const int width, const int frames, unsigned char* pic) { // compute frames const int pixels = frames * width * width; const int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < pixels) // (6.) Don't use excess { // (5.) Bye loops, hello constants const int frame = idx / (width * width); const int row = (idx / width) % width; const int col = idx % width; //for (int frame = 0; frame < frames; frame++) { const float delta = Delta * powf(0.98f, frame); const float xMin = xMid - delta; const float yMin = yMid - delta; const float dw = 2.0f * delta / width; //for (int row = 0; row < width; row++) { const float cy = yMin + row * dw; //for (int col = 0; col < width; col++) { const float cx = xMin + col * dw; float x = cx; float y = cy; int depth = 256; float x2, y2; do { x2 = x * x; y2 = y * y; y = 2 * x * y + cy; x = x2 - y2 + cx; depth--; } while ((depth > 0) && ((x2 + y2) < 5.0f)); pic[frame * width * width + row * width + col] = (unsigned char)depth; } } static void CheckCuda() { cudaError_t e; cudaDeviceSynchronize(); if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "CUDA error %d: %s\n", e, cudaGetErrorString(e)); exit(-1); } } int main(int argc, char *argv[]) { printf("Fractal v1.7\n"); // check command line if (argc != 3) {fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]); exit(-1);} const int width = atoi(argv[1]); if (width < 10) {fprintf(stderr, "error: frame_width must be at least 10\n"); exit(-1);} const int frames = atoi(argv[2]); if (frames < 1) {fprintf(stderr, "error: num_frames must be at least 1\n"); exit(-1);} printf("computing %d frames of %d by %d fractal\n", frames, width, width); // allocate picture array (host copies) unsigned char* pic = new unsigned char[frames * width * width]; // alloc space for device copy of pic (7.) const int N = frames * width * width; unsigned char * d_pic; const int size = N * sizeof(unsigned char); cudaMalloc((void **)&d_pic, size); // copy inputs to device if (cudaSuccess != cudaMemcpy(d_pic, pic, size, cudaMemcpyHostToDevice)) {fprintf(stderr, "copying to device failed\n"); exit(-1);} // start time timeval start, end; gettimeofday(&start, NULL); // launch GPU kernel (8.) fractalKernel<<<(N + ThreadsPerBlock - 1) / ThreadsPerBlock, ThreadsPerBlock>>>(width, frames, d_pic); cudaDeviceSynchronize(); // (9.) Called // end time gettimeofday(&end, NULL); const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0; printf("compute time: %.3f s\n", runtime); CheckCuda(); // (10.) CheckCuda // copy result back to host if (cudaSuccess != cudaMemcpy(pic, d_pic, size, cudaMemcpyDeviceToHost)) {fprintf(stderr, "copying from device failed\n"); exit(-1);} // verify result by writing frames to BMP files if ((width <= 256) && (frames <= 100)) { for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "fractal%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } } delete [] pic; cudaFree(d_pic); return 0; }
098f3fe7c506e8bc3a4079ac74ce36dfb760c5cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_xvel_minus_4_right; int xdim0_update_halo_kernel2_xvel_minus_4_right_h = -1; __constant__ int ydim0_update_halo_kernel2_xvel_minus_4_right; int ydim0_update_halo_kernel2_xvel_minus_4_right_h = -1; __constant__ int xdim1_update_halo_kernel2_xvel_minus_4_right; int xdim1_update_halo_kernel2_xvel_minus_4_right_h = -1; __constant__ int ydim1_update_halo_kernel2_xvel_minus_4_right; int ydim1_update_halo_kernel2_xvel_minus_4_right_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_xvel_minus_4_right*(y)+xdim0_update_halo_kernel2_xvel_minus_4_right*ydim0_update_halo_kernel2_xvel_minus_4_right*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_xvel_minus_4_right*(y)+xdim1_update_halo_kernel2_xvel_minus_4_right*ydim1_update_halo_kernel2_xvel_minus_4_right*(z)) //user function __device__ inline void update_halo_kernel2_xvel_minus_4_right(double *xvel0, double *xvel1, const int* fields) { if(fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0,0,0)] = -xvel0[OPS_ACC0(-4,0,0)]; if(fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0,0,0)] = -xvel1[OPS_ACC1(-4,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_xvel_minus_4_right( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_xvel_minus_4_right + idx_z * 1 * xdim0_update_halo_kernel2_xvel_minus_4_right * ydim0_update_halo_kernel2_xvel_minus_4_right; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_xvel_minus_4_right + idx_z * 1 * xdim1_update_halo_kernel2_xvel_minus_4_right * ydim1_update_halo_kernel2_xvel_minus_4_right; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_xvel_minus_4_right(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_xvel_minus_4_right(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(59,"update_halo_kernel2_xvel_minus_4_right"); OPS_kernels[59].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_4_right_h || ydim0 != ydim0_update_halo_kernel2_xvel_minus_4_right_h || xdim1 != xdim1_update_halo_kernel2_xvel_minus_4_right_h || ydim1 != ydim1_update_halo_kernel2_xvel_minus_4_right_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel2_xvel_minus_4_right, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_xvel_minus_4_right_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel2_xvel_minus_4_right, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_xvel_minus_4_right_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel2_xvel_minus_4_right, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_xvel_minus_4_right_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel2_xvel_minus_4_right, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_xvel_minus_4_right_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[59].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_minus_4_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[59].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[59].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[59].transfer += ops_compute_transfer(dim, range, &arg1); }
098f3fe7c506e8bc3a4079ac74ce36dfb760c5cf.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_xvel_minus_4_right; int xdim0_update_halo_kernel2_xvel_minus_4_right_h = -1; __constant__ int ydim0_update_halo_kernel2_xvel_minus_4_right; int ydim0_update_halo_kernel2_xvel_minus_4_right_h = -1; __constant__ int xdim1_update_halo_kernel2_xvel_minus_4_right; int xdim1_update_halo_kernel2_xvel_minus_4_right_h = -1; __constant__ int ydim1_update_halo_kernel2_xvel_minus_4_right; int ydim1_update_halo_kernel2_xvel_minus_4_right_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_xvel_minus_4_right*(y)+xdim0_update_halo_kernel2_xvel_minus_4_right*ydim0_update_halo_kernel2_xvel_minus_4_right*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_xvel_minus_4_right*(y)+xdim1_update_halo_kernel2_xvel_minus_4_right*ydim1_update_halo_kernel2_xvel_minus_4_right*(z)) //user function __device__ inline void update_halo_kernel2_xvel_minus_4_right(double *xvel0, double *xvel1, const int* fields) { if(fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0,0,0)] = -xvel0[OPS_ACC0(-4,0,0)]; if(fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0,0,0)] = -xvel1[OPS_ACC1(-4,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_xvel_minus_4_right( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_xvel_minus_4_right + idx_z * 1 * xdim0_update_halo_kernel2_xvel_minus_4_right * ydim0_update_halo_kernel2_xvel_minus_4_right; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_xvel_minus_4_right + idx_z * 1 * xdim1_update_halo_kernel2_xvel_minus_4_right * ydim1_update_halo_kernel2_xvel_minus_4_right; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_xvel_minus_4_right(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_xvel_minus_4_right(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(59,"update_halo_kernel2_xvel_minus_4_right"); OPS_kernels[59].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel2_xvel_minus_4_right_h || ydim0 != ydim0_update_halo_kernel2_xvel_minus_4_right_h || xdim1 != xdim1_update_halo_kernel2_xvel_minus_4_right_h || ydim1 != ydim1_update_halo_kernel2_xvel_minus_4_right_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel2_xvel_minus_4_right, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_xvel_minus_4_right_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel2_xvel_minus_4_right, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_xvel_minus_4_right_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel2_xvel_minus_4_right, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_xvel_minus_4_right_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel2_xvel_minus_4_right, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_xvel_minus_4_right_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[59].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_xvel_minus_4_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[59].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[59].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[59].transfer += ops_compute_transfer(dim, range, &arg1); }
badb198458c0f8e4880eb0642b423d6aacb9b301.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6) { comp = -1.6744E1f / powf(atanf(atanf((var_1 - var_2 / var_3 + sinf(tanhf((var_4 + var_5 + var_6 * +1.5095E-44f)))))), +1.8811E34f); printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7); hipDeviceSynchronize(); return 0; }
badb198458c0f8e4880eb0642b423d6aacb9b301.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6) { comp = -1.6744E1f / powf(atanf(atanf((var_1 - var_2 / var_3 + sinf(tanhf((var_4 + var_5 + var_6 * +1.5095E-44f)))))), +1.8811E34f); printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7); cudaDeviceSynchronize(); return 0; }
1b2d675ac21f96af33e1730aab76e5c6f802f076.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <iostream> #include <stdio.h> __global__ void scan_kernel_1(double const *X, double *Y, int N, double *carries) { __shared__ double shared_buffer[256]; double my_value; unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); unsigned int block_offset = 0; // run scan on each section for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) { // load data: my_value = (i < N) ? X[i] : 0; // inclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); if (threadIdx.x >= stride) my_value += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); // exclusive scan requires us to write a zero value at the beginning of each block my_value = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; // write to output array if (i < N) Y[i] = block_offset + my_value; block_offset += shared_buffer[blockDim.x-1]; } // write carry: if (threadIdx.x == 0) carries[blockIdx.x] = block_offset; } // exclusive-scan of carries __global__ void scan_kernel_2(double *carries) { __shared__ double shared_buffer[256]; // load data: double my_carry = carries[threadIdx.x]; // exclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); if (threadIdx.x >= stride) my_carry += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); // write to output array carries[threadIdx.x] = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; } __global__ void scan_kernel_3(double *Y, int N, double const *carries) { unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); __shared__ double shared_offset; if (threadIdx.x == 0) shared_offset = carries[blockIdx.x]; __syncthreads(); // add offset to each element in the block: for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) if (i < N) Y[i] += shared_offset; } void exclusive_scan(double const * input, double * output, int N) { int num_blocks = 256; int threads_per_block = 256; double *carries; hipMalloc(&carries, sizeof(double) * num_blocks); // First step: Scan within each thread group and write carries hipLaunchKernelGGL(( scan_kernel_1), dim3(num_blocks), dim3(threads_per_block), 0, 0, input, output, N, carries); // Second step: Compute offset for each thread group (exclusive scan for each thread group) hipLaunchKernelGGL(( scan_kernel_2), dim3(1), dim3(num_blocks), 0, 0, carries); // Third step: Offset each thread group accordingly hipLaunchKernelGGL(( scan_kernel_3), dim3(num_blocks), dim3(threads_per_block), 0, 0, output, N, carries); hipFree(carries); } int main() { int N = 200; // // Allocate host arrays for reference // double *x = (double *)malloc(sizeof(double) * N); double *y = (double *)malloc(sizeof(double) * N); double *z = (double *)malloc(sizeof(double) * N); std::fill(x, x + N, 1); // reference calculation: y[0] = 0; for (std::size_t i=1; i<N; ++i) y[i] = y[i-1] + x[i-1]; // // Allocate CUDA-arrays // double *cuda_x, *cuda_y; hipMalloc(&cuda_x, sizeof(double) * N); hipMalloc(&cuda_y, sizeof(double) * N); hipMemcpy(cuda_x, x, sizeof(double) * N, hipMemcpyHostToDevice); // Perform the exclusive scan and obtain results exclusive_scan(cuda_x, cuda_y, N); hipMemcpy(z, cuda_y, sizeof(double) * N, hipMemcpyDeviceToHost); // // Print first few entries for reference // std::cout << "CPU y: "; for (int i=0; i<10; ++i) std::cout << y[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << y[i] << " "; std::cout << std::endl; std::cout << "GPU y: "; for (int i=0; i<10; ++i) std::cout << z[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << z[i] << " "; std::cout << std::endl; // // Clean up: // free(x); free(y); free(z); hipFree(cuda_x); hipFree(cuda_y); return EXIT_SUCCESS; }
1b2d675ac21f96af33e1730aab76e5c6f802f076.cu
#include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <iostream> #include <stdio.h> __global__ void scan_kernel_1(double const *X, double *Y, int N, double *carries) { __shared__ double shared_buffer[256]; double my_value; unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); unsigned int block_offset = 0; // run scan on each section for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) { // load data: my_value = (i < N) ? X[i] : 0; // inclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); if (threadIdx.x >= stride) my_value += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); // exclusive scan requires us to write a zero value at the beginning of each block my_value = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; // write to output array if (i < N) Y[i] = block_offset + my_value; block_offset += shared_buffer[blockDim.x-1]; } // write carry: if (threadIdx.x == 0) carries[blockIdx.x] = block_offset; } // exclusive-scan of carries __global__ void scan_kernel_2(double *carries) { __shared__ double shared_buffer[256]; // load data: double my_carry = carries[threadIdx.x]; // exclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); if (threadIdx.x >= stride) my_carry += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); // write to output array carries[threadIdx.x] = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; } __global__ void scan_kernel_3(double *Y, int N, double const *carries) { unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); __shared__ double shared_offset; if (threadIdx.x == 0) shared_offset = carries[blockIdx.x]; __syncthreads(); // add offset to each element in the block: for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) if (i < N) Y[i] += shared_offset; } void exclusive_scan(double const * input, double * output, int N) { int num_blocks = 256; int threads_per_block = 256; double *carries; cudaMalloc(&carries, sizeof(double) * num_blocks); // First step: Scan within each thread group and write carries scan_kernel_1<<<num_blocks, threads_per_block>>>(input, output, N, carries); // Second step: Compute offset for each thread group (exclusive scan for each thread group) scan_kernel_2<<<1, num_blocks>>>(carries); // Third step: Offset each thread group accordingly scan_kernel_3<<<num_blocks, threads_per_block>>>(output, N, carries); cudaFree(carries); } int main() { int N = 200; // // Allocate host arrays for reference // double *x = (double *)malloc(sizeof(double) * N); double *y = (double *)malloc(sizeof(double) * N); double *z = (double *)malloc(sizeof(double) * N); std::fill(x, x + N, 1); // reference calculation: y[0] = 0; for (std::size_t i=1; i<N; ++i) y[i] = y[i-1] + x[i-1]; // // Allocate CUDA-arrays // double *cuda_x, *cuda_y; cudaMalloc(&cuda_x, sizeof(double) * N); cudaMalloc(&cuda_y, sizeof(double) * N); cudaMemcpy(cuda_x, x, sizeof(double) * N, cudaMemcpyHostToDevice); // Perform the exclusive scan and obtain results exclusive_scan(cuda_x, cuda_y, N); cudaMemcpy(z, cuda_y, sizeof(double) * N, cudaMemcpyDeviceToHost); // // Print first few entries for reference // std::cout << "CPU y: "; for (int i=0; i<10; ++i) std::cout << y[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << y[i] << " "; std::cout << std::endl; std::cout << "GPU y: "; for (int i=0; i<10; ++i) std::cout << z[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << z[i] << " "; std::cout << std::endl; // // Clean up: // free(x); free(y); free(z); cudaFree(cuda_x); cudaFree(cuda_y); return EXIT_SUCCESS; }
8c86d0969a59113e2edeae8a4708b5c73fcb5473.hip
// !!! This is a file automatically generated by hipify!!! /* For DIRECTED GRAPH */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <iostream> #include <vector> #include <unordered_map> #include <string> #include <algorithm> /***all macros**/ #define MAX_NODE 100000000 #define DEBUG 1 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /**all type declaration***/ using namespace std; class Node{ public: unsigned int val; vector<unsigned int> weights; vector<Node*> Edges; Node(int val){ this->val = val; } void addEdge(Node* v,unsigned int w){ this->Edges.push_back(v); this->weights.push_back(w); } }; /***function declarations***/ void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c); void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph, int* diffOff,int* diffEdges,unsigned int* diffWeight ); void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int* rev_offset,int* rev_edges); void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int& E, int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size, int* mOffset,int* mEdges,unsigned int* mWeight); void check_del_path(int u, int v,vector<int> Path, bool& flag); void check_cycle(int N,int* parent); void computeTime(float& time,hipEvent_t start, hipEvent_t stop); /**** device Code *******/ __device__ volatile int Cx[MAX_NODE]; __device__ volatile int PQ[MAX_NODE]; //K in parallel __global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* openList,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<K && PQ_size[id]>0){ //extract min from PQ int front = id* ( (N+K-1)/K ); int node = PQ[front]; // restructure the heap PQ[front]=PQ[front+PQ_size[id]-1]; PQ_size[id]-=1; int pqIndex = 0; while(2*pqIndex+1 < PQ_size[id]){ if(2*pqIndex+2 >= PQ_size[id]){ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else break; } else{ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){ int swap = PQ[front + 2*pqIndex+2]; PQ[front + 2*pqIndex+2] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+2; } else{ break; } } } //removed from openList openList[node] = -1; //added to expand next int len = atomicAdd(expandNodes_size,1); expandNodes[len]=node; } } //for K in parallel __global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent, int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList, int N,int E, int K,int dest,int* nVFlag,int* PQ_size, int flagDiff,int* diff_off,int* diff_edge,int* diff_weight,int dE ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id< *expandNodes_size ){ int node = expandNodes[id]; //reach dest if(node == dest){ atomicOr(flagfound,1); } // expand int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start < end){ int child = edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(leaveLoop==false){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(openList[child]==-1){ nVFlag[child]=1; //add only once } } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } //diff expand if(flagDiff){ start = diff_off[node]; end = dE; if(node!=N-1) end = diff_off[node+1]; while(start<end){ int child = diff_edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) bool leaveLoop = false; while(!leaveLoop){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(openList[child]==-1){ nVFlag[child]=1; //add only once } } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } } //end diff }//end } //K in parallel -- O(N) __global__ void keepHeapPQ(int* PQ_size,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQ_size[id] > 0){ int front = id*( (N+K-1)/K ); int size = PQ_size[id]; for(int i=front;i<front+size;i++){ if(2*i+2 < front+size){ int cost = Cx[PQ[i]]; int costLeft = Cx[PQ[2*i+1]]; int costRight = Cx[PQ[2*i+2]]; if( cost > costLeft || cost > costRight ){ int index ; if(costLeft <= costRight) index = 2*i+1; else index = 2*i+2; while(index > front){ if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ int swap = PQ[index]; PQ[index] = PQ[(index-1)/2]; PQ[(index-1)/2] = swap; index = (index-1)/2; } else break; } } } else if(2*i+1 < front+size){ if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){ int index = 2*i+1; while(index > front){ if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ int swap = PQ[index]; PQ[index] = PQ[(index-1)/2]; PQ[(index-1)/2] = swap; index = (index-1)/2; } else break; } } } } } } //N threads __global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ if(nextFlag[id]==1){ int index = atomicAdd(nvSize,1); nextV[index]=id; } } } //for K in parallel __global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int K,int N,int* openList){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K){ int front = id*( (N+K-1)/K ); int i = id; while(i<*nVsize){ //if not already present if(openList[nextV[i]]!=-1){ i+=K; continue; } PQ[front+PQS[id]]= nextV[i]; PQS[id]+=1; //add in openList openList[nextV[i]] = id; if(PQS[id]>1){ int index = PQS[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } i += K; } } } //for K in parallel __global__ void checkMIN(int* PQ_size,int* flagEnd,int dest,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQ_size[id] > 0 ){ int front = id* ( (N+K-1)/K ); int node = PQ[front]; //check if atleast one min, dont end the a* if( Cx[node] < Cx[dest] ){ atomicAnd(flagEnd,0); } } } __global__ void propogateDel(int* delEdgesV,int delEdge,int* rev_offset,int* rev_edges,unsigned int* rev_weight,int N,int E, int* Hx,int* parent,int* parent_old,int* lock,int* addFlag){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<delEdge){ int node = delEdgesV[id]; //check for the parent and add to nextflag and update the cost int start = rev_offset[node]; int end = E; if(node!=N-1) end = rev_offset[node+1]; //no parent // write in parent read always from old_parent parent[node] = -1; Cx[node]=INT_MAX; addFlag[node]=1; //if any parent can change the cost while(start< end){ int p = rev_edges[start]; //del edges if(p<0){ start++; continue; } int weight = rev_weight[start]; int flag_cycle = false; //check parent doesn't contain node int ancestor = parent_old[p]; while(ancestor!=-1){ if(ancestor==node){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } //no need to lock only single parent so only one node in array so one node per thread if(!flag_cycle && Cx[p]!=INT_MAX && Cx[node] > (Cx[p]-Hx[p])+weight+Hx[node] ){ Cx[node] = (Cx[p]-Hx[p] )+weight+Hx[node]; parent[node] = p; } start++; } } } //add inserted edges to propogate __global__ void propogateAdd(int* diff_off, int* diff_edges,unsigned int* diff_W,int* Hx,int* addFlag, int* lock, int* parent, int* parent_old, int N, int dE){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ int node = id; int start = diff_off[node]; int end = dE; if(node!=N-1) end = diff_off[node+1]; while(start < end ){ int child = diff_edges[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(!leaveLoop){ if(atomicCAS(&lock[child],0,1)==0){ //critical section bool flag_cycle = false; int ancestor = node; while(ancestor > 0){ if(ancestor==child){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } /* if(flag_cycle){ printf("Add %d->%d,%d:%d::%d\n",node,child,Cx[node],Cx[child],(Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]); if(Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]){ int ancestor = node; while(ancestor > 0){ if(ancestor==child){ printf("%d:%d\n",ancestor,Cx[ancestor]); break; } printf("%d:%d::%d ",ancestor,Cx[ancestor],parent[ancestor]); ancestor = parent_old[ancestor]; } } }*/ if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]; parent[child] = node; __threadfence(); addFlag[child]=1; } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } } } //propogate the change __global__ void propogate(int* nodes, int* size, int* off, int* edge,unsigned int* W,int* Hx, int N,int E, int* lock, int* parent,int* parent_old,int* addFlag, int* diff_off,int* diff_edge,unsigned int* diff_W,int dE, int* rev_offset,int* rev_edges,unsigned int* rev_weight, int* rev_diff_offset,int* rev_diff_edges,unsigned int* rev_diff_weight){ int id = blockIdx.x*blockDim.x+threadIdx.x; // printf("Entering %d\n",id); if(id < *size){ int node = nodes[id]; int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start < end ){ int child = edge[start]; //deleted edges if(child<0){ start++; continue; } bool flag_cycle_insert = false; int optimal_parent = node; while(optimal_parent > 0){ if(optimal_parent == child){ flag_cycle_insert = true; break; } optimal_parent = parent_old[optimal_parent]; } if(!flag_cycle_insert){ //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(!leaveLoop){ if(atomicExch(&lock[child],1)==0){ //critical section if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; addFlag[child]=1; } else if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){ //use back edges int rstart = rev_offset[child]; int rend = E; if(child!=N-1) rend = rev_offset[child+1]; //there is always one parent that is node. Cx[child] = INT_MAX; parent[child]=-1; while(rstart < rend){ int p = rev_edges[rstart]; if(p<0){ rstart++; continue; } int weight = rev_weight[rstart]; bool flag_cycle = false; //check parent doesn't contain child int ancestor = parent_old[p]; while(ancestor > 0){ if(ancestor==child){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; parent[child] = p; } rstart++; } //newly added backedges rstart = rev_diff_offset[child]; rend = dE; if(child!=N-1) rend = rev_diff_offset[child+1]; while(rstart < rend){ int p = rev_diff_edges[rstart]; if(p<0){ rstart++; continue; } int weight = rev_diff_weight[rstart]; int flag_cycle = false; //check parent doesn't contain child int ancestor = parent_old[p]; while(ancestor!=-1){ if(ancestor==child){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; parent[child] = p; } rstart++; } addFlag[child]=1; } //end critical section leaveLoop = true; atomicExch(&lock[child],0); } __syncthreads(); } } start++; } start = diff_off[node]; end = dE; if(node!=N-1) end = diff_off[node+1]; while(start < end ){ int child = diff_edge[start]; //deleted edges if(child<0){ start++; continue; } bool flag_cycle_insert = false; int optimal_parent = node; while(optimal_parent > 0){ if(optimal_parent == child){ flag_cycle_insert = true; break; } optimal_parent = parent_old[optimal_parent]; } if(!flag_cycle_insert){ //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(!leaveLoop){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]; __threadfence(); parent[child] = node; addFlag[child]=1; }else if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){ //use back edges int rstart = rev_offset[child]; int rend = E; if(child!=N-1) rend = rev_offset[child+1]; //there is always one parent that is node. Cx[child] = INT_MAX; parent[child]=-1; while(rstart < rend){ int p = rev_edges[rstart]; if(p<0){ rstart++; continue; } int weight = rev_weight[rstart]; int flag_cycle = false; //check parent doesn't contain child int ancestor = parent_old[p]; while(ancestor!=-1){ if(ancestor==child) flag_cycle = true; ancestor = parent_old[ancestor]; } if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; parent[child] = p; } rstart++; } rstart = rev_diff_offset[child]; rend = dE; if(child!=N-1) rend = rev_diff_offset[child+1]; while(rstart < rend){ int p = rev_diff_edges[rstart]; if(p<0){ rstart++; continue; } int weight = rev_diff_weight[rstart]; int flag_cycle = false; //check parent doesn't contain child int ancestor = parent_old[p]; while(ancestor!=-1){ if(ancestor==child){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; parent[child] = p; } rstart++; } addFlag[child]=1; } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } } start++; } } } //do in 1 thread __global__ void insertDest(int* PQ_size, int dest,int* openList){ int id = 0; int front = 0; if(openList[dest]==-1){ PQ[front+PQ_size[id]]= dest; PQ_size[id]+=1; //add in openList openList[dest] = id; if(PQ_size[id]>1){ int index = PQ_size[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } } } __global__ void getCx(int dest,int* val){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id==0){ *val = Cx[dest]; } } /**** main function ****/ int main(){ //the K PQ int K ; scanf("%d\n",&K); int startNode,endNode; scanf("%d %d",&startNode,&endNode); FILE* fgraph = fopen("graph.txt","r"); FILE* fgraph_rev = fopen("graph_op.txt","r"); int N,E; fscanf(fgraph_rev,"%d %d\n",&N,&E); fscanf(fgraph,"%d %d\n",&N,&E); int* H_offset = (int*)malloc(sizeof(int)*N); int* H_edges = (int*)malloc(sizeof(int)*E); unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E); int* H_hx = (int*)malloc(sizeof(int)*N); int* H_cx = (int*)malloc(sizeof(int)*N); int* H_parent = (int*)malloc(sizeof(int)*N); int* H_PQ = (int*)malloc(sizeof(int)*N); int* H_openList = (int*)malloc(sizeof(int)*N); int* H_PQ_size = (int*)malloc(sizeof(int)*K); //for reverse graph int* H_rev_edges = (int*)malloc(sizeof(int)*E); int* H_rev_offset = (int*)malloc(sizeof(int)*N); unsigned int* H_rev_weight = (unsigned int*)malloc(sizeof(unsigned int)*E); //for cost of endNode int* H_dest_cost = (int*)malloc(sizeof(int)); memset(H_PQ_size,0,sizeof(int)*K); memset(H_openList,-1,sizeof(int)*N); //init cx for(int i=0;i<N;i++){ H_cx[i]=INT_MAX; H_parent[i]=-1; } for(int i=0;i<E;i++){ fscanf(fgraph,"%d",&H_edges[i]); fscanf(fgraph_rev,"%d",&H_rev_edges[i]); } for(int i=0;i<N;i++){ fscanf(fgraph,"%d",&H_offset[i]); fscanf(fgraph_rev,"%d",&H_rev_offset[i]); } for(int i=0;i<E;i++){ fscanf(fgraph,"%u",&H_weight[i]); fscanf(fgraph_rev,"%u",&H_rev_weight[i]); } FILE* fhx = fopen("Hx.txt","r"); for(int i=0;i<N;i++){ int temp; fscanf(fhx,"%d",&temp); if(temp!=-1) H_hx[i]= temp; else H_hx[i] = 0; //to change } fclose(fgraph); fclose(fhx); fclose(fgraph_rev); printf("[INFO] completed taking input\n"); //init Host var int* H_flagEnd = (int*)malloc(sizeof(int)); int* H_flagfound = (int*)malloc(sizeof(int)); int* H_a0 = (int*)malloc(sizeof(int)); int* H_nV_size = (int*)malloc(sizeof(int)); //required coz if many tries to add same in diff threads high low lower int* H_nVFlag = (int*)malloc(sizeof(int)*N); memset(H_nVFlag,-1,sizeof(int)*N); *H_flagEnd = 0; *H_flagfound = 0; *H_a0 = 0; //insert startNode in PQ[0] H_cx[startNode]=H_hx[startNode]; H_PQ[0]=startNode; H_PQ_size[0]=1; H_openList[startNode]=0; //create events to record runtime float run_time = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //graph struture int* D_offset; int* D_edges ; unsigned int* D_weight; int* D_hx; int* D_parent; //for reading the ancessostor to avoid lock for write after read. int* D_parent_old; //Priority queue size int* D_PQ_size; //flag if in openList(contains which PQ) int* D_openList; //lock for nodes int* D_lock; //Diff structure int* D_diff_edges; int* D_diff_offset; unsigned int* D_diff_weight; //reverse graph int* D_rev_edges; int* D_rev_offset; unsigned int* D_rev_weight; //reverse diff int* D_rev_diff_offset; int* D_rev_diff_edges; unsigned int* D_rev_diff_weight; //next nodes flag int* D_nVFlag; //next nodes array to insert PQ int* D_nV; int* D_nV_size; //nodes to be expanded ( extracted from PQ ) int* D_expandNodes; int* D_expandNodes_size; //flag to end while loop and found the destination int* D_flagEnd; int* D_flagfound; //cost of endNode int* D_dest_cost; //list of nodes v of deleted edges u-> int* D_delEdgesV; gpuErrchk ( hipMalloc(&D_offset,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_edges,sizeof(int)*E) ); gpuErrchk ( hipMalloc(&D_weight,sizeof(unsigned int)*E) ); gpuErrchk ( hipMalloc(&D_hx,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_parent,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_parent_old,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_PQ_size,sizeof(int)*K) ); gpuErrchk ( hipMalloc(&D_openList,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_lock,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_dest_cost,sizeof(int)) ); //rev graph gpuErrchk ( hipMalloc(&D_rev_edges,sizeof(int)*E) ); gpuErrchk ( hipMalloc(&D_rev_offset,sizeof(int)*N ) ); gpuErrchk ( hipMalloc(&D_rev_weight,sizeof(unsigned int)*E) ); //for next set of vertices to add in PQ gpuErrchk ( hipMalloc(&D_nV,sizeof(int)*N) ); gpuErrchk ( hipMalloc(&D_nV_size,sizeof(int)) ); gpuErrchk ( hipMalloc(&D_nVFlag,sizeof(int)*N) ); //next nodes to expand gpuErrchk ( hipMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K gpuErrchk ( hipMalloc(&D_expandNodes_size,sizeof(int)) ); //flag to end search gpuErrchk( hipMalloc(&D_flagEnd,sizeof(int)) ); gpuErrchk( hipMalloc(&D_flagfound,sizeof(int)) ); gpuErrchk ( hipMemcpy(D_offset,H_offset,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_edges,H_edges,sizeof(int)*E,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_hx,H_hx,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_parent,H_parent,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_openList,H_openList,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpyToSymbol(Cx,H_cx, sizeof(int)*N, 0, hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_flagEnd,H_flagEnd,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_flagfound,H_flagfound,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); //reverse graph gpuErrchk ( hipMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_rev_weight,H_rev_weight,sizeof(unsigned int)*E,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemset(D_lock,0,sizeof(int)*N) ); int flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } int numThreads = 512; int numBlocks = (K+numThreads-1)/numThreads; int N_numBlocks = (N+numThreads-1)/numThreads; if(DEBUG) printf("[INFO] A* started\n"); hipEventRecord(start); //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_not_empty==1){ //extract min hipLaunchKernelGGL(( extractMin), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( A_star_expand), dim3(numBlocks),dim3(numThreads), 0, 0, D_offset,D_edges,D_weight,D_hx,D_parent, D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, false,D_diff_offset,D_diff_edges,D_diff_offset,0); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( keepHeapPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); //gen from flag D_nV //for N in parallel hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipLaunchKernelGGL(( insertPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_nV,D_nV_size,K,N,D_openList); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); //cpy flagend and flagEmpty gpuErrchk( hipMemcpy(H_flagfound,D_flagfound, sizeof(int),hipMemcpyDeviceToHost) ); gpuErrchk( hipMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,hipMemcpyDeviceToHost) ); //reset nVFlag gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); //reset next insert array gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //check for mins if( *H_flagfound==1 && flag_PQ_not_empty==1){ //end gpuErrchk( hipMemcpy(D_flagEnd,H_flagfound,sizeof(int),hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( checkMIN), dim3(numBlocks),dim3(numThreads) , 0, 0, D_PQ_size,D_flagEnd,endNode,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); gpuErrchk( hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost) ); } } hipLaunchKernelGGL(( getCx), dim3(1),dim3(1), 0, 0, endNode,D_dest_cost); gpuErrchk( hipMemcpy(H_dest_cost,D_dest_cost, sizeof(int),hipMemcpyDeviceToHost) ); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) ); vector<int> Path; printf("[OUT] Cost: %d\n",*H_dest_cost); printf("[OUT] Path(in reverse): "); if(*H_dest_cost!=INT_MAX){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); Path.push_back(p); p = H_parent[p]; } Path.push_back(p); printf("%d\n",p); } else{ printf("not found\n"); } //reverse the path to get from source to end reverse(Path.begin(),Path.end()); // // check_cycle(N,H_parent); /////////////////////////////////////////////// // A star complete // FILE* fdiff = fopen("Updates.txt","r"); int line; int update_count = 0; while(fscanf(fdiff,"%d\n",&line)!=EOF){ //list of nodes v of deleted edges u->v int* H_delEdgesV = (int*)malloc(sizeof(int)*E); gpuErrchk ( hipMalloc(&D_delEdgesV,sizeof(int)*E) ); unordered_map<unsigned int,Node*> Graph; unordered_map<unsigned int,Node*> rev_Graph; bool flag_do_a_star = false; int insertEdge=0, delEdge=0; int delEdgesV_size = 0; //v whose cost can change due to deletion for(int i=0;i<line;i++){ int flag; int u,v; unsigned int w; fscanf(fdiff,"%d %d %d %u\n",&flag,&u,&v,&w); if(flag==1){ insertDiff(Graph,u,v,w); insertDiff(rev_Graph,v,u,w); insertEdge++; } else if(flag==0){ //check id del edges in optimal path. check_del_path(u,v,Path,flag_do_a_star); removeDelEdges(u,v,H_offset,H_edges,N,E,H_rev_offset,H_rev_edges); //add to list only if its cost changes due to this deletion if(H_parent[v]==u){ H_delEdgesV[delEdgesV_size]=v; delEdgesV_size++; } delEdge++; } } // inseetEdge is insertion size //for diff int* H_diff_edges = (int*)malloc(sizeof(int)*insertEdge); int* H_diff_offset = (int*)malloc(sizeof(int)*N); unsigned int* H_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge); //diff for revrse graph int* H_rev_diff_edges = (int*)malloc(sizeof(int)*insertEdge); int* H_rev_diff_offset = (int*)malloc(sizeof(int)*N); unsigned int* H_rev_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge); //diff csr gpuErrchk ( hipMalloc(&D_diff_edges,sizeof(int)*insertEdge) ); gpuErrchk ( hipMalloc(&D_diff_offset,sizeof(int)*(N+1) ) ); //coz gpuErrchk ( hipMalloc(&D_diff_weight,sizeof(unsigned int)*insertEdge) ); //rev diff graph gpuErrchk ( hipMalloc(&D_rev_diff_edges,sizeof(int)*insertEdge) ); gpuErrchk ( hipMalloc(&D_rev_diff_offset,sizeof(int)*(N+1) ) ); gpuErrchk ( hipMalloc(&D_rev_diff_weight,sizeof(unsigned int)*insertEdge) ); //reset offset to 0 ..ie no nodes memset(H_diff_offset,0,sizeof(int)*N); memset(H_rev_diff_offset,0,sizeof(int)*N); if(1) printf("[INFO](%d) insertion:%d, deletion:%d, delaff:%d\n",update_count,insertEdge,delEdge,delEdgesV_size); createDiffGraph(N,Graph,H_diff_offset,H_diff_edges,H_diff_weight); createDiffGraph(N,rev_Graph,H_rev_diff_offset,H_rev_diff_edges,H_rev_diff_weight); //TODO free the graphs //deleted edges gpuErrchk ( hipMemcpy(D_edges,H_edges,sizeof(int)*E,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_delEdgesV,H_delEdgesV,sizeof(int)*E,hipMemcpyHostToDevice) ); //diff graph gpuErrchk ( hipMemcpy(D_diff_edges,H_diff_edges,sizeof(int)*insertEdge,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_diff_offset,H_diff_offset,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_diff_weight,H_diff_weight,sizeof(unsigned int)*insertEdge,hipMemcpyHostToDevice) ); //rev diff graph gpuErrchk ( hipMemcpy(D_rev_diff_edges,H_rev_diff_edges,sizeof(int)*insertEdge,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_rev_diff_offset,H_rev_diff_offset,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_rev_diff_weight,H_rev_diff_weight,sizeof(unsigned int)*insertEdge,hipMemcpyHostToDevice) ); //reset D_nV flag gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); //add del if(delEdgesV_size>0){ if(DEBUG) printf("[INFO] Starting computing cost for deletions\n"); //old parent to check cycle gpuErrchk( hipMemcpy(D_parent_old,D_parent,sizeof(int)*N,hipMemcpyDeviceToDevice) ); int numBlocks_del = ( delEdgesV_size + numThreads -1)/numThreads; hipEventRecord(start); hipLaunchKernelGGL(( propogateDel), dim3(numBlocks_del),dim3(numThreads), 0, 0, D_delEdgesV,delEdgesV_size,D_rev_offset,D_rev_edges,D_rev_weight,N,E, D_hx,D_parent,D_parent_old,D_lock,D_nVFlag); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); } // // gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) ); // check_cycle(N,H_parent); if(DEBUG) printf("[INFO] starting computing cost for inserions\n"); gpuErrchk( hipMemcpy(D_parent_old,D_parent,sizeof(int)*N,hipMemcpyDeviceToDevice) ); hipEventRecord(start); //N parallel hipLaunchKernelGGL(( propogateAdd), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_diff_offset, D_diff_edges,D_diff_weight,D_hx,D_nVFlag, D_lock,D_parent,D_parent_old,N,insertEdge); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); // // gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) ); // check_cycle(N,H_parent); gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); //gen from flag D_nV hipEventRecord(start); hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); //copy back gpuErrchk( hipMemcpy(H_nV_size,D_nV_size, sizeof(int),hipMemcpyDeviceToHost) ); //reset nV flags gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); if(DEBUG) printf("[INFO] starting propogation\n"); while(*H_nV_size > 0){ numBlocks = (*H_nV_size+numThreads-1)/numThreads; //old parent to check cycle and remove locking on parent gpuErrchk( hipMemcpy(D_parent_old,D_parent,sizeof(int)*N,hipMemcpyDeviceToDevice) ); //printf("[INFO] update size:%d\n",*H_nV_size); hipEventRecord(start); hipLaunchKernelGGL(( propogate), dim3(numBlocks),dim3(numThreads), 0, 0, D_nV,D_nV_size,D_offset,D_edges,D_weight,D_hx, N,E,D_lock,D_parent,D_parent_old,D_nVFlag, D_diff_offset,D_diff_edges,D_diff_weight,insertEdge, D_rev_offset,D_rev_edges,D_rev_weight, D_rev_diff_offset,D_rev_diff_edges,D_rev_diff_weight); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); //reset size=0 gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); //gen from flag D_nV hipEventRecord(start); hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); //copy back gpuErrchk( hipMemcpy(H_nV_size,D_nV_size, sizeof(int),hipMemcpyDeviceToHost) ); //reset nV flags gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); } if(DEBUG) printf("[INFO] updating priority queue\n"); //propogate complete do normal A* numBlocks = (K+numThreads-1)/numThreads; //update PQ after propogate hipEventRecord(start); hipLaunchKernelGGL(( keepHeapPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); //check if there is node cost in PQ less than dest *H_flagEnd = 1; gpuErrchk( hipMemcpy(D_flagEnd,H_flagEnd,sizeof(int),hipMemcpyHostToDevice) ); hipEventRecord(start); hipLaunchKernelGGL(( checkMIN), dim3(numBlocks),dim3(numThreads) , 0, 0, D_PQ_size,D_flagEnd,endNode,N,K); gpuErrchk( hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); gpuErrchk( hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost) ); //here flag end represents from above that there is a node with cost lesser if(*H_flagEnd==0 && flag_do_a_star){ printf("[INFO] doing a* after propogation\n"); hipEventRecord(start); hipLaunchKernelGGL(( insertDest), dim3(1),dim3(1), 0, 0, D_PQ_size,endNode,D_openList); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //reset flags *H_flagEnd = 0; *H_flagfound = 0; gpuErrchk ( hipMemcpy(D_flagfound,H_flagfound,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_not_empty==1){ //extract min hipEventRecord(start); hipLaunchKernelGGL(( extractMin), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); hipEventRecord(start); hipLaunchKernelGGL(( A_star_expand), dim3(numBlocks),dim3(numThreads), 0, 0, D_offset,D_edges,D_weight,D_hx,D_parent, D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, true,D_diff_offset,D_diff_edges,D_diff_offset,insertEdge); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); hipEventRecord(start); hipLaunchKernelGGL(( keepHeapPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,N,K); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); //gen from flag D_nV //for N in parallel hipEventRecord(start); hipLaunchKernelGGL(( setNV), dim3(N_numBlocks),dim3(numThreads), 0, 0, D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); hipEventRecord(start); hipLaunchKernelGGL(( insertPQ), dim3(numBlocks),dim3(numThreads), 0, 0, D_PQ_size,D_nV,D_nV_size,K,N,D_openList); gpuErrchk(hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); //cpy flagend and flagEmpty gpuErrchk( hipMemcpy(H_flagfound,D_flagfound, sizeof(int),hipMemcpyDeviceToHost) ); gpuErrchk( hipMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,hipMemcpyDeviceToHost) ); //reset nVFlag gpuErrchk( hipMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,hipMemcpyHostToDevice) ); //reset next insert array gpuErrchk( hipMemcpy(D_nV_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(D_expandNodes_size,H_a0,sizeof(int),hipMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //check for mins if( *H_flagfound==1 && flag_PQ_not_empty==1){ //end gpuErrchk( hipMemcpy(D_flagEnd,H_flagfound,sizeof(int),hipMemcpyHostToDevice) ); hipEventRecord(start); hipLaunchKernelGGL(( checkMIN), dim3(numBlocks),dim3(numThreads) , 0, 0, D_PQ_size,D_flagEnd,endNode,N,K); gpuErrchk( hipPeekAtLastError() ); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); gpuErrchk( hipMemcpy(H_flagEnd,D_flagEnd, sizeof(int),hipMemcpyDeviceToHost) ); // printf("\ninside MIN\n"); } } } hipEventRecord(start); hipLaunchKernelGGL(( getCx), dim3(1),dim3(1), 0, 0, endNode,D_dest_cost); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); computeTime(run_time,start,stop); gpuErrchk( hipMemcpy(H_parent,D_parent, sizeof(int)*N,hipMemcpyDeviceToHost) ); // found or not found based on Cx gpuErrchk( hipMemcpy(H_dest_cost,D_dest_cost, sizeof(int),hipMemcpyDeviceToHost) ); //remove old path Path.clear(); printf("[OUT] Cost: %d\n",*H_dest_cost); printf("[OUT] Path(in reverse): "); if(*H_dest_cost!=INT_MAX){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); Path.push_back(p); p = H_parent[p]; } Path.push_back(p); printf("%d\n",p); } else{ printf("not found\n"); } //reverse the path to get from source to end reverse(Path.begin(),Path.end()); //merge graph int* H_offset_new,*H_edges_new; unsigned int* H_weight_new; int E_new = E + insertEdge - delEdge; H_offset_new = (int*)malloc(sizeof(int)*N); H_edges_new = (int*)malloc(sizeof(int)*E_new); H_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new); mergeDiff(H_offset,H_edges,H_weight,N,E, H_diff_offset,H_diff_edges,H_diff_weight,insertEdge,delEdge, H_offset_new,H_edges_new,H_weight_new); //free pointer free(H_offset); free(H_edges); free(H_weight); free(H_diff_offset); free(H_diff_edges); free(H_diff_weight); H_offset = H_offset_new; H_edges = H_edges_new; H_weight = H_weight_new; //hipFree and cpy hipFree(D_edges); hipFree(D_weight); hipFree(D_diff_edges); hipFree(D_diff_offset); hipFree(D_diff_weight); gpuErrchk ( hipMalloc(&D_edges,sizeof(int)*E_new) ); gpuErrchk ( hipMalloc(&D_weight,sizeof(unsigned int)*E_new) ); gpuErrchk ( hipMemcpy(D_offset,H_offset,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_edges,H_edges,sizeof(int)*E_new,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_weight,H_weight,sizeof(unsigned int)*E_new,hipMemcpyHostToDevice) ); //merge rev graph int* H_rev_offset_new,*H_rev_edges_new; unsigned int* H_rev_weight_new; H_rev_offset_new = (int*)malloc(sizeof(int)*N); H_rev_edges_new = (int*)malloc(sizeof(int)*E_new); H_rev_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new); mergeDiff(H_rev_offset,H_rev_edges,H_rev_weight,N,E, H_rev_diff_offset,H_rev_diff_edges,H_rev_diff_weight,insertEdge,delEdge, H_rev_offset_new,H_rev_edges_new,H_rev_weight_new); free(H_rev_offset); free(H_rev_edges); free(H_rev_weight); free(H_rev_diff_offset); free(H_rev_diff_edges); free(H_rev_diff_weight); H_rev_offset = H_rev_offset_new; H_rev_edges = H_rev_edges_new; H_rev_weight = H_rev_weight_new; //cuda free and cpy hipFree(D_rev_edges); hipFree(D_rev_weight); hipFree(D_rev_diff_edges); hipFree(D_rev_diff_offset); hipFree(D_rev_diff_weight); gpuErrchk ( hipMalloc(&D_rev_edges,sizeof(int)*E_new) ); gpuErrchk ( hipMalloc(&D_rev_weight,sizeof(unsigned int)*E_new) ); gpuErrchk ( hipMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E_new,hipMemcpyHostToDevice) ); gpuErrchk ( hipMemcpy(D_rev_weight,H_rev_weight,sizeof(unsigned int)*E_new,hipMemcpyHostToDevice) ); //change E E = E_new; hipFree(D_delEdgesV); free(H_delEdgesV); //inc update_count++; } printf("[INFO] update count: %d\n",update_count); printf("[INFO] RUNTIME: %f\n",run_time); //cuda free // free everything } void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c){ unordered_map<unsigned int,Node*>:: iterator itr; itr = Graph.find(a); if(itr!=Graph.end()){ Node* n = itr->second; unordered_map<unsigned int,Node*>:: iterator it; it = Graph.find(b); if(it!=Graph.end()){ Node* v = it->second; n->addEdge(v,c); } else{ Node* v = new Node(b); n->addEdge(v,c); Graph.insert(pair<unsigned int,Node*>(b,v)); } } else{ Node* n =new Node(a); Graph.insert(pair<unsigned int,Node*>(a,n)); unordered_map<unsigned int,Node*>:: iterator it; it = Graph.find(b); if(it!=Graph.end()){ Node* v = it->second; n->addEdge(v,c); } else{ Node* v = new Node(b); n->addEdge(v,c); Graph.insert(pair<unsigned int,Node*>(b,v)); } } } void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph, int* diffOff,int* diffEdges,unsigned int* diffWeight ){ int offindex = 0; diffOff[offindex] = 0; offindex++; int k =0; int weightCount = 0; for(int i=0;i<N;i++){ unordered_map<unsigned int,Node*>:: iterator itr; itr = Graph.find(i); if(itr!=Graph.end()){ Node* n = itr->second; for(int j=0;j<n->Edges.size();j++){ diffEdges[k] = n->Edges[j]->val; k++; } for(int j=0;j<n->weights.size();j++){ diffWeight[weightCount] = n->weights[j]; weightCount++; } if(offindex < N ){ diffOff[offindex] = k; offindex++; } } else{ if(offindex < N ){ diffOff[offindex] = k; offindex++; } } } } void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int* rev_offset,int* rev_edges){ int start = offset[u]; int end = E; if(u!=N-1) end = offset[u+1]; while(start<end){ if( v == edges[start]){ edges[start]=-1; break; } start++; } start = rev_offset[v]; end = E; if(v!=N-1) end = rev_offset[v+1]; while(start < end){ if(u == rev_edges[start]){ rev_edges[start] = -1; break; } start++; } } void check_del_path(int u, int v,vector<int> Path, bool& flag){ vector<int> :: iterator itr; itr = find(Path.begin(),Path.end(),u); if(itr!=Path.end()){ itr+=1; if(*itr == v) flag = true; } } void check_cycle(int N,int* parent){ int flag = 0; for(int i=0;i<N;i++){ vector<int> visited(N,0); int ancestor = parent[i]; while(ancestor > 0){ if(visited[ancestor]==1){ printf("cycle at: %d, %d\n",i,ancestor); flag =1; break; } visited[ancestor]=1; ancestor = parent[ancestor]; } } if(flag==0) printf("no cycle\n"); } void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int& E, int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size, int* mOffset,int* mEdges,unsigned int* mWeight){ mOffset[0] = 0; int edegOffset= 0; for(int i=0;i<N;i++){ int start = offset[i]; int end = E; if(i!=N-1) end = offset[i+1]; int count = 0; while(start<end){ int child = edges[start]; if(child!=-1){ mEdges[edegOffset+count] = child; mWeight[edegOffset+count] = weight[start]; count++; } start++; } start = diff_offset[i]; end = insert_size; if(i!=N-1) end = diff_offset[i+1]; while(start<end){ int child = diff_edges[start]; if(child!=-1){ mEdges[edegOffset+count] = child; mWeight[edegOffset+count]= diff_weight[start]; count++; } start++; } edegOffset+=count; if(i!=N-1) mOffset[i+1]=edegOffset; } } void computeTime(float& time,hipEvent_t start, hipEvent_t stop){ float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); time+= milliseconds; //printf("[INFO] run time: %f, %f\n",time,milliseconds); }
8c86d0969a59113e2edeae8a4708b5c73fcb5473.cu
/* For DIRECTED GRAPH */ #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <iostream> #include <vector> #include <unordered_map> #include <string> #include <algorithm> /***all macros**/ #define MAX_NODE 100000000 #define DEBUG 1 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /**all type declaration***/ using namespace std; class Node{ public: unsigned int val; vector<unsigned int> weights; vector<Node*> Edges; Node(int val){ this->val = val; } void addEdge(Node* v,unsigned int w){ this->Edges.push_back(v); this->weights.push_back(w); } }; /***function declarations***/ void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c); void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph, int* diffOff,int* diffEdges,unsigned int* diffWeight ); void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int* rev_offset,int* rev_edges); void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int& E, int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size, int* mOffset,int* mEdges,unsigned int* mWeight); void check_del_path(int u, int v,vector<int> Path, bool& flag); void check_cycle(int N,int* parent); void computeTime(float& time,cudaEvent_t start, cudaEvent_t stop); /**** device Code *******/ __device__ volatile int Cx[MAX_NODE]; __device__ volatile int PQ[MAX_NODE]; //K in parallel __global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* openList,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<K && PQ_size[id]>0){ //extract min from PQ int front = id* ( (N+K-1)/K ); int node = PQ[front]; // restructure the heap PQ[front]=PQ[front+PQ_size[id]-1]; PQ_size[id]-=1; int pqIndex = 0; while(2*pqIndex+1 < PQ_size[id]){ if(2*pqIndex+2 >= PQ_size[id]){ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else break; } else{ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){ int swap = PQ[front + 2*pqIndex+2]; PQ[front + 2*pqIndex+2] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+2; } else{ break; } } } //removed from openList openList[node] = -1; //added to expand next int len = atomicAdd(expandNodes_size,1); expandNodes[len]=node; } } //for K in parallel __global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent, int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList, int N,int E, int K,int dest,int* nVFlag,int* PQ_size, int flagDiff,int* diff_off,int* diff_edge,int* diff_weight,int dE ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id< *expandNodes_size ){ int node = expandNodes[id]; //reach dest if(node == dest){ atomicOr(flagfound,1); } // expand int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start < end){ int child = edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(leaveLoop==false){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(openList[child]==-1){ nVFlag[child]=1; //add only once } } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } //diff expand if(flagDiff){ start = diff_off[node]; end = dE; if(node!=N-1) end = diff_off[node+1]; while(start<end){ int child = diff_edge[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) bool leaveLoop = false; while(!leaveLoop){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; if(openList[child]==-1){ nVFlag[child]=1; //add only once } } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } } //end diff }//end } //K in parallel -- O(N) __global__ void keepHeapPQ(int* PQ_size,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQ_size[id] > 0){ int front = id*( (N+K-1)/K ); int size = PQ_size[id]; for(int i=front;i<front+size;i++){ if(2*i+2 < front+size){ int cost = Cx[PQ[i]]; int costLeft = Cx[PQ[2*i+1]]; int costRight = Cx[PQ[2*i+2]]; if( cost > costLeft || cost > costRight ){ int index ; if(costLeft <= costRight) index = 2*i+1; else index = 2*i+2; while(index > front){ if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ int swap = PQ[index]; PQ[index] = PQ[(index-1)/2]; PQ[(index-1)/2] = swap; index = (index-1)/2; } else break; } } } else if(2*i+1 < front+size){ if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){ int index = 2*i+1; while(index > front){ if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ int swap = PQ[index]; PQ[index] = PQ[(index-1)/2]; PQ[(index-1)/2] = swap; index = (index-1)/2; } else break; } } } } } } //N threads __global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ if(nextFlag[id]==1){ int index = atomicAdd(nvSize,1); nextV[index]=id; } } } //for K in parallel __global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int K,int N,int* openList){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K){ int front = id*( (N+K-1)/K ); int i = id; while(i<*nVsize){ //if not already present if(openList[nextV[i]]!=-1){ i+=K; continue; } PQ[front+PQS[id]]= nextV[i]; PQS[id]+=1; //add in openList openList[nextV[i]] = id; if(PQS[id]>1){ int index = PQS[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } i += K; } } } //for K in parallel __global__ void checkMIN(int* PQ_size,int* flagEnd,int dest,int N,int K){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQ_size[id] > 0 ){ int front = id* ( (N+K-1)/K ); int node = PQ[front]; //check if atleast one min, dont end the a* if( Cx[node] < Cx[dest] ){ atomicAnd(flagEnd,0); } } } __global__ void propogateDel(int* delEdgesV,int delEdge,int* rev_offset,int* rev_edges,unsigned int* rev_weight,int N,int E, int* Hx,int* parent,int* parent_old,int* lock,int* addFlag){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<delEdge){ int node = delEdgesV[id]; //check for the parent and add to nextflag and update the cost int start = rev_offset[node]; int end = E; if(node!=N-1) end = rev_offset[node+1]; //no parent // write in parent read always from old_parent parent[node] = -1; Cx[node]=INT_MAX; addFlag[node]=1; //if any parent can change the cost while(start< end){ int p = rev_edges[start]; //del edges if(p<0){ start++; continue; } int weight = rev_weight[start]; int flag_cycle = false; //check parent doesn't contain node int ancestor = parent_old[p]; while(ancestor!=-1){ if(ancestor==node){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } //no need to lock only single parent so only one node in array so one node per thread if(!flag_cycle && Cx[p]!=INT_MAX && Cx[node] > (Cx[p]-Hx[p])+weight+Hx[node] ){ Cx[node] = (Cx[p]-Hx[p] )+weight+Hx[node]; parent[node] = p; } start++; } } } //add inserted edges to propogate __global__ void propogateAdd(int* diff_off, int* diff_edges,unsigned int* diff_W,int* Hx,int* addFlag, int* lock, int* parent, int* parent_old, int N, int dE){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ int node = id; int start = diff_off[node]; int end = dE; if(node!=N-1) end = diff_off[node+1]; while(start < end ){ int child = diff_edges[start]; //deleted edges if(child<0){ start++; continue; } //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(!leaveLoop){ if(atomicCAS(&lock[child],0,1)==0){ //critical section bool flag_cycle = false; int ancestor = node; while(ancestor > 0){ if(ancestor==child){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } /* if(flag_cycle){ printf("Add %d->%d,%d:%d::%d\n",node,child,Cx[node],Cx[child],(Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]); if(Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]){ int ancestor = node; while(ancestor > 0){ if(ancestor==child){ printf("%d:%d\n",ancestor,Cx[ancestor]); break; } printf("%d:%d::%d ",ancestor,Cx[ancestor],parent[ancestor]); ancestor = parent_old[ancestor]; } } }*/ if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]; parent[child] = node; __threadfence(); addFlag[child]=1; } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } start++; } } } //propogate the change __global__ void propogate(int* nodes, int* size, int* off, int* edge,unsigned int* W,int* Hx, int N,int E, int* lock, int* parent,int* parent_old,int* addFlag, int* diff_off,int* diff_edge,unsigned int* diff_W,int dE, int* rev_offset,int* rev_edges,unsigned int* rev_weight, int* rev_diff_offset,int* rev_diff_edges,unsigned int* rev_diff_weight){ int id = blockIdx.x*blockDim.x+threadIdx.x; // printf("Entering %d\n",id); if(id < *size){ int node = nodes[id]; int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start < end ){ int child = edge[start]; //deleted edges if(child<0){ start++; continue; } bool flag_cycle_insert = false; int optimal_parent = node; while(optimal_parent > 0){ if(optimal_parent == child){ flag_cycle_insert = true; break; } optimal_parent = parent_old[optimal_parent]; } if(!flag_cycle_insert){ //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(!leaveLoop){ if(atomicExch(&lock[child],1)==0){ //critical section if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); parent[child] = node; addFlag[child]=1; } else if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){ //use back edges int rstart = rev_offset[child]; int rend = E; if(child!=N-1) rend = rev_offset[child+1]; //there is always one parent that is node. Cx[child] = INT_MAX; parent[child]=-1; while(rstart < rend){ int p = rev_edges[rstart]; if(p<0){ rstart++; continue; } int weight = rev_weight[rstart]; bool flag_cycle = false; //check parent doesn't contain child int ancestor = parent_old[p]; while(ancestor > 0){ if(ancestor==child){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; parent[child] = p; } rstart++; } //newly added backedges rstart = rev_diff_offset[child]; rend = dE; if(child!=N-1) rend = rev_diff_offset[child+1]; while(rstart < rend){ int p = rev_diff_edges[rstart]; if(p<0){ rstart++; continue; } int weight = rev_diff_weight[rstart]; int flag_cycle = false; //check parent doesn't contain child int ancestor = parent_old[p]; while(ancestor!=-1){ if(ancestor==child){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; parent[child] = p; } rstart++; } addFlag[child]=1; } //end critical section leaveLoop = true; atomicExch(&lock[child],0); } __syncthreads(); } } start++; } start = diff_off[node]; end = dE; if(node!=N-1) end = diff_off[node+1]; while(start < end ){ int child = diff_edge[start]; //deleted edges if(child<0){ start++; continue; } bool flag_cycle_insert = false; int optimal_parent = node; while(optimal_parent > 0){ if(optimal_parent == child){ flag_cycle_insert = true; break; } optimal_parent = parent_old[optimal_parent]; } if(!flag_cycle_insert){ //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock bool leaveLoop = false; while(!leaveLoop){ if(atomicCAS(&lock[child],0,1)==0){ //critical section if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]; __threadfence(); parent[child] = node; addFlag[child]=1; }else if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){ //use back edges int rstart = rev_offset[child]; int rend = E; if(child!=N-1) rend = rev_offset[child+1]; //there is always one parent that is node. Cx[child] = INT_MAX; parent[child]=-1; while(rstart < rend){ int p = rev_edges[rstart]; if(p<0){ rstart++; continue; } int weight = rev_weight[rstart]; int flag_cycle = false; //check parent doesn't contain child int ancestor = parent_old[p]; while(ancestor!=-1){ if(ancestor==child) flag_cycle = true; ancestor = parent_old[ancestor]; } if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; parent[child] = p; } rstart++; } rstart = rev_diff_offset[child]; rend = dE; if(child!=N-1) rend = rev_diff_offset[child+1]; while(rstart < rend){ int p = rev_diff_edges[rstart]; if(p<0){ rstart++; continue; } int weight = rev_diff_weight[rstart]; int flag_cycle = false; //check parent doesn't contain child int ancestor = parent_old[p]; while(ancestor!=-1){ if(ancestor==child){ flag_cycle = true; break; } ancestor = parent_old[ancestor]; } if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; parent[child] = p; } rstart++; } addFlag[child]=1; } //end critical section leaveLoop = true; atomicCAS(&lock[child],1,0); } __syncthreads(); } } start++; } } } //do in 1 thread __global__ void insertDest(int* PQ_size, int dest,int* openList){ int id = 0; int front = 0; if(openList[dest]==-1){ PQ[front+PQ_size[id]]= dest; PQ_size[id]+=1; //add in openList openList[dest] = id; if(PQ_size[id]>1){ int index = PQ_size[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } } } __global__ void getCx(int dest,int* val){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id==0){ *val = Cx[dest]; } } /**** main function ****/ int main(){ //the K PQ int K ; scanf("%d\n",&K); int startNode,endNode; scanf("%d %d",&startNode,&endNode); FILE* fgraph = fopen("graph.txt","r"); FILE* fgraph_rev = fopen("graph_op.txt","r"); int N,E; fscanf(fgraph_rev,"%d %d\n",&N,&E); fscanf(fgraph,"%d %d\n",&N,&E); int* H_offset = (int*)malloc(sizeof(int)*N); int* H_edges = (int*)malloc(sizeof(int)*E); unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E); int* H_hx = (int*)malloc(sizeof(int)*N); int* H_cx = (int*)malloc(sizeof(int)*N); int* H_parent = (int*)malloc(sizeof(int)*N); int* H_PQ = (int*)malloc(sizeof(int)*N); int* H_openList = (int*)malloc(sizeof(int)*N); int* H_PQ_size = (int*)malloc(sizeof(int)*K); //for reverse graph int* H_rev_edges = (int*)malloc(sizeof(int)*E); int* H_rev_offset = (int*)malloc(sizeof(int)*N); unsigned int* H_rev_weight = (unsigned int*)malloc(sizeof(unsigned int)*E); //for cost of endNode int* H_dest_cost = (int*)malloc(sizeof(int)); memset(H_PQ_size,0,sizeof(int)*K); memset(H_openList,-1,sizeof(int)*N); //init cx for(int i=0;i<N;i++){ H_cx[i]=INT_MAX; H_parent[i]=-1; } for(int i=0;i<E;i++){ fscanf(fgraph,"%d",&H_edges[i]); fscanf(fgraph_rev,"%d",&H_rev_edges[i]); } for(int i=0;i<N;i++){ fscanf(fgraph,"%d",&H_offset[i]); fscanf(fgraph_rev,"%d",&H_rev_offset[i]); } for(int i=0;i<E;i++){ fscanf(fgraph,"%u",&H_weight[i]); fscanf(fgraph_rev,"%u",&H_rev_weight[i]); } FILE* fhx = fopen("Hx.txt","r"); for(int i=0;i<N;i++){ int temp; fscanf(fhx,"%d",&temp); if(temp!=-1) H_hx[i]= temp; else H_hx[i] = 0; //to change } fclose(fgraph); fclose(fhx); fclose(fgraph_rev); printf("[INFO] completed taking input\n"); //init Host var int* H_flagEnd = (int*)malloc(sizeof(int)); int* H_flagfound = (int*)malloc(sizeof(int)); int* H_a0 = (int*)malloc(sizeof(int)); int* H_nV_size = (int*)malloc(sizeof(int)); //required coz if many tries to add same in diff threads high low lower int* H_nVFlag = (int*)malloc(sizeof(int)*N); memset(H_nVFlag,-1,sizeof(int)*N); *H_flagEnd = 0; *H_flagfound = 0; *H_a0 = 0; //insert startNode in PQ[0] H_cx[startNode]=H_hx[startNode]; H_PQ[0]=startNode; H_PQ_size[0]=1; H_openList[startNode]=0; //create events to record runtime float run_time = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //graph struture int* D_offset; int* D_edges ; unsigned int* D_weight; int* D_hx; int* D_parent; //for reading the ancessostor to avoid lock for write after read. int* D_parent_old; //Priority queue size int* D_PQ_size; //flag if in openList(contains which PQ) int* D_openList; //lock for nodes int* D_lock; //Diff structure int* D_diff_edges; int* D_diff_offset; unsigned int* D_diff_weight; //reverse graph int* D_rev_edges; int* D_rev_offset; unsigned int* D_rev_weight; //reverse diff int* D_rev_diff_offset; int* D_rev_diff_edges; unsigned int* D_rev_diff_weight; //next nodes flag int* D_nVFlag; //next nodes array to insert PQ int* D_nV; int* D_nV_size; //nodes to be expanded ( extracted from PQ ) int* D_expandNodes; int* D_expandNodes_size; //flag to end while loop and found the destination int* D_flagEnd; int* D_flagfound; //cost of endNode int* D_dest_cost; //list of nodes v of deleted edges u-> int* D_delEdgesV; gpuErrchk ( cudaMalloc(&D_offset,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E) ); gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E) ); gpuErrchk ( cudaMalloc(&D_hx,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_parent,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_parent_old,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_PQ_size,sizeof(int)*K) ); gpuErrchk ( cudaMalloc(&D_openList,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_lock,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_dest_cost,sizeof(int)) ); //rev graph gpuErrchk ( cudaMalloc(&D_rev_edges,sizeof(int)*E) ); gpuErrchk ( cudaMalloc(&D_rev_offset,sizeof(int)*N ) ); gpuErrchk ( cudaMalloc(&D_rev_weight,sizeof(unsigned int)*E) ); //for next set of vertices to add in PQ gpuErrchk ( cudaMalloc(&D_nV,sizeof(int)*N) ); gpuErrchk ( cudaMalloc(&D_nV_size,sizeof(int)) ); gpuErrchk ( cudaMalloc(&D_nVFlag,sizeof(int)*N) ); //next nodes to expand gpuErrchk ( cudaMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K gpuErrchk ( cudaMalloc(&D_expandNodes_size,sizeof(int)) ); //flag to end search gpuErrchk( cudaMalloc(&D_flagEnd,sizeof(int)) ); gpuErrchk( cudaMalloc(&D_flagfound,sizeof(int)) ); gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_hx,H_hx,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpyToSymbol(Cx,H_cx, sizeof(int)*N, 0, cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); //reverse graph gpuErrchk ( cudaMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_rev_weight,H_rev_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemset(D_lock,0,sizeof(int)*N) ); int flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } int numThreads = 512; int numBlocks = (K+numThreads-1)/numThreads; int N_numBlocks = (N+numThreads-1)/numThreads; if(DEBUG) printf("[INFO] A* started\n"); cudaEventRecord(start); //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_not_empty==1){ //extract min extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent, D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, false,D_diff_offset,D_diff_edges,D_diff_offset,0); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); //gen from flag D_nV //for N in parallel setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,K,N,D_openList); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); //cpy flagend and flagEmpty gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) ); //reset nVFlag gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); //reset next insert array gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //check for mins if( *H_flagfound==1 && flag_PQ_not_empty==1){ //end gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) ); checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,endNode,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) ); } } getCx<<<1,1>>>(endNode,D_dest_cost); gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) ); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) ); vector<int> Path; printf("[OUT] Cost: %d\n",*H_dest_cost); printf("[OUT] Path(in reverse): "); if(*H_dest_cost!=INT_MAX){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); Path.push_back(p); p = H_parent[p]; } Path.push_back(p); printf("%d\n",p); } else{ printf("not found\n"); } //reverse the path to get from source to end reverse(Path.begin(),Path.end()); // // check_cycle(N,H_parent); /////////////////////////////////////////////// // A star complete // FILE* fdiff = fopen("Updates.txt","r"); int line; int update_count = 0; while(fscanf(fdiff,"%d\n",&line)!=EOF){ //list of nodes v of deleted edges u->v int* H_delEdgesV = (int*)malloc(sizeof(int)*E); gpuErrchk ( cudaMalloc(&D_delEdgesV,sizeof(int)*E) ); unordered_map<unsigned int,Node*> Graph; unordered_map<unsigned int,Node*> rev_Graph; bool flag_do_a_star = false; int insertEdge=0, delEdge=0; int delEdgesV_size = 0; //v whose cost can change due to deletion for(int i=0;i<line;i++){ int flag; int u,v; unsigned int w; fscanf(fdiff,"%d %d %d %u\n",&flag,&u,&v,&w); if(flag==1){ insertDiff(Graph,u,v,w); insertDiff(rev_Graph,v,u,w); insertEdge++; } else if(flag==0){ //check id del edges in optimal path. check_del_path(u,v,Path,flag_do_a_star); removeDelEdges(u,v,H_offset,H_edges,N,E,H_rev_offset,H_rev_edges); //add to list only if its cost changes due to this deletion if(H_parent[v]==u){ H_delEdgesV[delEdgesV_size]=v; delEdgesV_size++; } delEdge++; } } // inseetEdge is insertion size //for diff int* H_diff_edges = (int*)malloc(sizeof(int)*insertEdge); int* H_diff_offset = (int*)malloc(sizeof(int)*N); unsigned int* H_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge); //diff for revrse graph int* H_rev_diff_edges = (int*)malloc(sizeof(int)*insertEdge); int* H_rev_diff_offset = (int*)malloc(sizeof(int)*N); unsigned int* H_rev_diff_weight = (unsigned int*)malloc(sizeof(unsigned int)*insertEdge); //diff csr gpuErrchk ( cudaMalloc(&D_diff_edges,sizeof(int)*insertEdge) ); gpuErrchk ( cudaMalloc(&D_diff_offset,sizeof(int)*(N+1) ) ); //coz gpuErrchk ( cudaMalloc(&D_diff_weight,sizeof(unsigned int)*insertEdge) ); //rev diff graph gpuErrchk ( cudaMalloc(&D_rev_diff_edges,sizeof(int)*insertEdge) ); gpuErrchk ( cudaMalloc(&D_rev_diff_offset,sizeof(int)*(N+1) ) ); gpuErrchk ( cudaMalloc(&D_rev_diff_weight,sizeof(unsigned int)*insertEdge) ); //reset offset to 0 ..ie no nodes memset(H_diff_offset,0,sizeof(int)*N); memset(H_rev_diff_offset,0,sizeof(int)*N); if(1) printf("[INFO](%d) insertion:%d, deletion:%d, delaff:%d\n",update_count,insertEdge,delEdge,delEdgesV_size); createDiffGraph(N,Graph,H_diff_offset,H_diff_edges,H_diff_weight); createDiffGraph(N,rev_Graph,H_rev_diff_offset,H_rev_diff_edges,H_rev_diff_weight); //TODO free the graphs //deleted edges gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_delEdgesV,H_delEdgesV,sizeof(int)*E,cudaMemcpyHostToDevice) ); //diff graph gpuErrchk ( cudaMemcpy(D_diff_edges,H_diff_edges,sizeof(int)*insertEdge,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_diff_offset,H_diff_offset,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_diff_weight,H_diff_weight,sizeof(unsigned int)*insertEdge,cudaMemcpyHostToDevice) ); //rev diff graph gpuErrchk ( cudaMemcpy(D_rev_diff_edges,H_rev_diff_edges,sizeof(int)*insertEdge,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_rev_diff_offset,H_rev_diff_offset,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_rev_diff_weight,H_rev_diff_weight,sizeof(unsigned int)*insertEdge,cudaMemcpyHostToDevice) ); //reset D_nV flag gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); //add del if(delEdgesV_size>0){ if(DEBUG) printf("[INFO] Starting computing cost for deletions\n"); //old parent to check cycle gpuErrchk( cudaMemcpy(D_parent_old,D_parent,sizeof(int)*N,cudaMemcpyDeviceToDevice) ); int numBlocks_del = ( delEdgesV_size + numThreads -1)/numThreads; cudaEventRecord(start); propogateDel<<<numBlocks_del,numThreads>>>(D_delEdgesV,delEdgesV_size,D_rev_offset,D_rev_edges,D_rev_weight,N,E, D_hx,D_parent,D_parent_old,D_lock,D_nVFlag); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); } // // gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) ); // check_cycle(N,H_parent); if(DEBUG) printf("[INFO] starting computing cost for inserions\n"); gpuErrchk( cudaMemcpy(D_parent_old,D_parent,sizeof(int)*N,cudaMemcpyDeviceToDevice) ); cudaEventRecord(start); //N parallel propogateAdd<<<N_numBlocks,numThreads>>>(D_diff_offset, D_diff_edges,D_diff_weight,D_hx,D_nVFlag, D_lock,D_parent,D_parent_old,N,insertEdge); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); // // gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) ); // check_cycle(N,H_parent); gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); //gen from flag D_nV cudaEventRecord(start); setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); //copy back gpuErrchk( cudaMemcpy(H_nV_size,D_nV_size, sizeof(int),cudaMemcpyDeviceToHost) ); //reset nV flags gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); if(DEBUG) printf("[INFO] starting propogation\n"); while(*H_nV_size > 0){ numBlocks = (*H_nV_size+numThreads-1)/numThreads; //old parent to check cycle and remove locking on parent gpuErrchk( cudaMemcpy(D_parent_old,D_parent,sizeof(int)*N,cudaMemcpyDeviceToDevice) ); //printf("[INFO] update size:%d\n",*H_nV_size); cudaEventRecord(start); propogate<<<numBlocks,numThreads>>>(D_nV,D_nV_size,D_offset,D_edges,D_weight,D_hx, N,E,D_lock,D_parent,D_parent_old,D_nVFlag, D_diff_offset,D_diff_edges,D_diff_weight,insertEdge, D_rev_offset,D_rev_edges,D_rev_weight, D_rev_diff_offset,D_rev_diff_edges,D_rev_diff_weight); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); //reset size=0 gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); //gen from flag D_nV cudaEventRecord(start); setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); //copy back gpuErrchk( cudaMemcpy(H_nV_size,D_nV_size, sizeof(int),cudaMemcpyDeviceToHost) ); //reset nV flags gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); } if(DEBUG) printf("[INFO] updating priority queue\n"); //propogate complete do normal A* numBlocks = (K+numThreads-1)/numThreads; //update PQ after propogate cudaEventRecord(start); keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); //check if there is node cost in PQ less than dest *H_flagEnd = 1; gpuErrchk( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) ); cudaEventRecord(start); checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,endNode,N,K); gpuErrchk( cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) ); //here flag end represents from above that there is a node with cost lesser if(*H_flagEnd==0 && flag_do_a_star){ printf("[INFO] doing a* after propogation\n"); cudaEventRecord(start); insertDest<<<1,1>>>(D_PQ_size,endNode,D_openList); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //reset flags *H_flagEnd = 0; *H_flagfound = 0; gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); //DO A* initailly on whole graph while(*H_flagEnd==0 && flag_PQ_not_empty==1){ //extract min cudaEventRecord(start); extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); cudaEventRecord(start); A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent, D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList, N,E,K,endNode,D_nVFlag,D_PQ_size, true,D_diff_offset,D_diff_edges,D_diff_offset,insertEdge); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); cudaEventRecord(start); keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,N,K); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); //gen from flag D_nV //for N in parallel cudaEventRecord(start); setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); cudaEventRecord(start); insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,K,N,D_openList); gpuErrchk(cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); //cpy flagend and flagEmpty gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) ); //reset nVFlag gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) ); //reset next insert array gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) ); flag_PQ_not_empty = 0; for(int i=0;i<K;i++){ if(H_PQ_size[i]>0) flag_PQ_not_empty=1; } //check for mins if( *H_flagfound==1 && flag_PQ_not_empty==1){ //end gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) ); cudaEventRecord(start); checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,endNode,N,K); gpuErrchk( cudaPeekAtLastError() ); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) ); // printf("\ninside MIN\n"); } } } cudaEventRecord(start); getCx<<<1,1>>>(endNode,D_dest_cost); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); computeTime(run_time,start,stop); gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) ); // found or not found based on Cx gpuErrchk( cudaMemcpy(H_dest_cost,D_dest_cost, sizeof(int),cudaMemcpyDeviceToHost) ); //remove old path Path.clear(); printf("[OUT] Cost: %d\n",*H_dest_cost); printf("[OUT] Path(in reverse): "); if(*H_dest_cost!=INT_MAX){ int p = endNode; while(H_parent[p]!=-1){ printf("%d ",p); Path.push_back(p); p = H_parent[p]; } Path.push_back(p); printf("%d\n",p); } else{ printf("not found\n"); } //reverse the path to get from source to end reverse(Path.begin(),Path.end()); //merge graph int* H_offset_new,*H_edges_new; unsigned int* H_weight_new; int E_new = E + insertEdge - delEdge; H_offset_new = (int*)malloc(sizeof(int)*N); H_edges_new = (int*)malloc(sizeof(int)*E_new); H_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new); mergeDiff(H_offset,H_edges,H_weight,N,E, H_diff_offset,H_diff_edges,H_diff_weight,insertEdge,delEdge, H_offset_new,H_edges_new,H_weight_new); //free pointer free(H_offset); free(H_edges); free(H_weight); free(H_diff_offset); free(H_diff_edges); free(H_diff_weight); H_offset = H_offset_new; H_edges = H_edges_new; H_weight = H_weight_new; //cudaFree and cpy cudaFree(D_edges); cudaFree(D_weight); cudaFree(D_diff_edges); cudaFree(D_diff_offset); cudaFree(D_diff_weight); gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E_new) ); gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E_new) ); gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E_new,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E_new,cudaMemcpyHostToDevice) ); //merge rev graph int* H_rev_offset_new,*H_rev_edges_new; unsigned int* H_rev_weight_new; H_rev_offset_new = (int*)malloc(sizeof(int)*N); H_rev_edges_new = (int*)malloc(sizeof(int)*E_new); H_rev_weight_new = (unsigned int*)malloc(sizeof(unsigned int)*E_new); mergeDiff(H_rev_offset,H_rev_edges,H_rev_weight,N,E, H_rev_diff_offset,H_rev_diff_edges,H_rev_diff_weight,insertEdge,delEdge, H_rev_offset_new,H_rev_edges_new,H_rev_weight_new); free(H_rev_offset); free(H_rev_edges); free(H_rev_weight); free(H_rev_diff_offset); free(H_rev_diff_edges); free(H_rev_diff_weight); H_rev_offset = H_rev_offset_new; H_rev_edges = H_rev_edges_new; H_rev_weight = H_rev_weight_new; //cuda free and cpy cudaFree(D_rev_edges); cudaFree(D_rev_weight); cudaFree(D_rev_diff_edges); cudaFree(D_rev_diff_offset); cudaFree(D_rev_diff_weight); gpuErrchk ( cudaMalloc(&D_rev_edges,sizeof(int)*E_new) ); gpuErrchk ( cudaMalloc(&D_rev_weight,sizeof(unsigned int)*E_new) ); gpuErrchk ( cudaMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E_new,cudaMemcpyHostToDevice) ); gpuErrchk ( cudaMemcpy(D_rev_weight,H_rev_weight,sizeof(unsigned int)*E_new,cudaMemcpyHostToDevice) ); //change E E = E_new; cudaFree(D_delEdgesV); free(H_delEdgesV); //inc update_count++; } printf("[INFO] update count: %d\n",update_count); printf("[INFO] RUNTIME: %f\n",run_time); //cuda free // free everything } void insertDiff(unordered_map< unsigned int, Node*>& Graph,int a,int b,unsigned int c){ unordered_map<unsigned int,Node*>:: iterator itr; itr = Graph.find(a); if(itr!=Graph.end()){ Node* n = itr->second; unordered_map<unsigned int,Node*>:: iterator it; it = Graph.find(b); if(it!=Graph.end()){ Node* v = it->second; n->addEdge(v,c); } else{ Node* v = new Node(b); n->addEdge(v,c); Graph.insert(pair<unsigned int,Node*>(b,v)); } } else{ Node* n =new Node(a); Graph.insert(pair<unsigned int,Node*>(a,n)); unordered_map<unsigned int,Node*>:: iterator it; it = Graph.find(b); if(it!=Graph.end()){ Node* v = it->second; n->addEdge(v,c); } else{ Node* v = new Node(b); n->addEdge(v,c); Graph.insert(pair<unsigned int,Node*>(b,v)); } } } void createDiffGraph(int N,unordered_map<unsigned int,Node*>& Graph, int* diffOff,int* diffEdges,unsigned int* diffWeight ){ int offindex = 0; diffOff[offindex] = 0; offindex++; int k =0; int weightCount = 0; for(int i=0;i<N;i++){ unordered_map<unsigned int,Node*>:: iterator itr; itr = Graph.find(i); if(itr!=Graph.end()){ Node* n = itr->second; for(int j=0;j<n->Edges.size();j++){ diffEdges[k] = n->Edges[j]->val; k++; } for(int j=0;j<n->weights.size();j++){ diffWeight[weightCount] = n->weights[j]; weightCount++; } if(offindex < N ){ diffOff[offindex] = k; offindex++; } } else{ if(offindex < N ){ diffOff[offindex] = k; offindex++; } } } } void removeDelEdges(int u,int v,int* offset,int* edges,int N,int E,int* rev_offset,int* rev_edges){ int start = offset[u]; int end = E; if(u!=N-1) end = offset[u+1]; while(start<end){ if( v == edges[start]){ edges[start]=-1; break; } start++; } start = rev_offset[v]; end = E; if(v!=N-1) end = rev_offset[v+1]; while(start < end){ if(u == rev_edges[start]){ rev_edges[start] = -1; break; } start++; } } void check_del_path(int u, int v,vector<int> Path, bool& flag){ vector<int> :: iterator itr; itr = find(Path.begin(),Path.end(),u); if(itr!=Path.end()){ itr+=1; if(*itr == v) flag = true; } } void check_cycle(int N,int* parent){ int flag = 0; for(int i=0;i<N;i++){ vector<int> visited(N,0); int ancestor = parent[i]; while(ancestor > 0){ if(visited[ancestor]==1){ printf("cycle at: %d, %d\n",i,ancestor); flag =1; break; } visited[ancestor]=1; ancestor = parent[ancestor]; } } if(flag==0) printf("no cycle\n"); } void mergeDiff(int* offset,int* edges,unsigned int* weight,int N,int& E, int* diff_offset, int* diff_edges,unsigned int* diff_weight,int insert_size,int del_size, int* mOffset,int* mEdges,unsigned int* mWeight){ mOffset[0] = 0; int edegOffset= 0; for(int i=0;i<N;i++){ int start = offset[i]; int end = E; if(i!=N-1) end = offset[i+1]; int count = 0; while(start<end){ int child = edges[start]; if(child!=-1){ mEdges[edegOffset+count] = child; mWeight[edegOffset+count] = weight[start]; count++; } start++; } start = diff_offset[i]; end = insert_size; if(i!=N-1) end = diff_offset[i+1]; while(start<end){ int child = diff_edges[start]; if(child!=-1){ mEdges[edegOffset+count] = child; mWeight[edegOffset+count]= diff_weight[start]; count++; } start++; } edegOffset+=count; if(i!=N-1) mOffset[i+1]=edegOffset; } } void computeTime(float& time,cudaEvent_t start, cudaEvent_t stop){ float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); time+= milliseconds; //printf("[INFO] run time: %f, %f\n",time,milliseconds); }
150ac03c9bc15b2505da782d973c30c2c1532ab4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by albert on 27.04.19. // #include "removeUselessStatesKernel.h" __global__ void removeUselessStates(HashMap *h, State *t, int *sSize, int slidesCount) { int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK_COUNT; for (int i = id * MAX_S_SIZE; i < id * MAX_S_SIZE + sSize[id]; i++) { if (t[i].f == -1) continue; State *tmp = h->find(t[i].node, slidesCount); if (tmp->f != -1 && tmp->g < t[i].g) t[i].f = -1; } }
150ac03c9bc15b2505da782d973c30c2c1532ab4.cu
// // Created by albert on 27.04.19. // #include "removeUselessStatesKernel.h" __global__ void removeUselessStates(HashMap *h, State *t, int *sSize, int slidesCount) { int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK_COUNT; for (int i = id * MAX_S_SIZE; i < id * MAX_S_SIZE + sSize[id]; i++) { if (t[i].f == -1) continue; State *tmp = h->find(t[i].node, slidesCount); if (tmp->f != -1 && tmp->g < t[i].g) t[i].f = -1; } }
7443c47d08b589cd543fd487a7e7a8dfe4450855.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/geometry/voxelgrid.h" #include "cupoch/integration/marching_cubes_const.h" #include "cupoch/integration/uniform_tsdfvolume.h" #include "cupoch/utility/helper.h" #include <thrust/iterator/discard_iterator.h> using namespace cupoch; using namespace cupoch::integration; namespace { __device__ float GetTSDFAt(const Eigen::Vector3f &p, const geometry::TSDFVoxel *voxels, float voxel_length, int resolution) { Eigen::Vector3i idx; Eigen::Vector3f p_grid = p / voxel_length - Eigen::Vector3f(0.5, 0.5, 0.5); for (int i = 0; i < 3; i++) { idx(i) = (int)::floor(p_grid(i)); } Eigen::Vector3f r = p_grid - idx.cast<float>(); float tsdf = 0; tsdf += (1 - r(0)) * (1 - r(1)) * (1 - r(2)) * voxels[IndexOf(idx + Eigen::Vector3i(0, 0, 0), resolution)].tsdf_; tsdf += (1 - r(0)) * (1 - r(1)) * r(2) * voxels[IndexOf(idx + Eigen::Vector3i(0, 0, 1), resolution)].tsdf_; tsdf += (1 - r(0)) * r(1) * (1 - r(2)) * voxels[IndexOf(idx + Eigen::Vector3i(0, 1, 0), resolution)].tsdf_; tsdf += (1 - r(0)) * r(1) * r(2) * voxels[IndexOf(idx + Eigen::Vector3i(0, 1, 1), resolution)].tsdf_; tsdf += r(0) * (1 - r(1)) * (1 - r(2)) * voxels[IndexOf(idx + Eigen::Vector3i(1, 0, 0), resolution)].tsdf_; tsdf += r(0) * (1 - r(1)) * r(2) * voxels[IndexOf(idx + Eigen::Vector3i(1, 0, 1), resolution)].tsdf_; tsdf += r(0) * r(1) * (1 - r(2)) * voxels[IndexOf(idx + Eigen::Vector3i(1, 1, 0), resolution)].tsdf_; tsdf += r(0) * r(1) * r(2) * voxels[IndexOf(idx + Eigen::Vector3i(1, 1, 1), resolution)].tsdf_; return tsdf; } __device__ Eigen::Vector3f GetNormalAt(const Eigen::Vector3f &p, const geometry::TSDFVoxel *voxels, float voxel_length, int resolution) { Eigen::Vector3f n; const double half_gap = 0.99 * voxel_length; #pragma unroll for (int i = 0; i < 3; i++) { Eigen::Vector3f p0 = p; p0(i) -= half_gap; Eigen::Vector3f p1 = p; p1(i) += half_gap; n(i) = GetTSDFAt(p1, voxels, voxel_length, resolution) - GetTSDFAt(p0, voxels, voxel_length, resolution); } return n.normalized(); } struct extract_pointcloud_functor { extract_pointcloud_functor(const geometry::TSDFVoxel* voxels, int resolution, float voxel_length, const Eigen::Vector3f &origin, TSDFVolumeColorType color_type) : voxels_(voxels), resolution_(resolution), voxel_length_(voxel_length), origin_(origin), half_voxel_length_(0.5 * voxel_length_), color_type_(color_type){}; const geometry::TSDFVoxel* voxels_; const int resolution_; const float voxel_length_; const Eigen::Vector3f origin_; const float half_voxel_length_; const TSDFVolumeColorType color_type_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f> operator()(const size_t idx) { int res2 = (resolution_ - 2) * (resolution_ - 2); int x = idx / (3 * res2) + 1; int yzi = idx % (3 * res2); int y = yzi / (3 * (resolution_ - 2)) + 1; int zi = yzi % (3 * (resolution_ - 2)); int z = zi / 3 + 1; int i = zi % 3; Eigen::Vector3f point(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()); Eigen::Vector3f normal(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()); Eigen::Vector3f color(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()); Eigen::Vector3i idx0(x, y, z); float w0 = voxels_[IndexOf(idx0, resolution_)].weight_; float f0 = voxels_[IndexOf(idx0, resolution_)].tsdf_; const Eigen::Vector3f &c0 = voxels_[IndexOf(idx0, resolution_)].color_; if (!(w0 != 0.0f && f0 < 0.98f && f0 >= -0.98f)) { return thrust::make_tuple(point, normal, color); } Eigen::Vector3f p0(half_voxel_length_ + voxel_length_ * x, half_voxel_length_ + voxel_length_ * y, half_voxel_length_ + voxel_length_ * z); Eigen::Vector3f p1 = p0; p1(i) += voxel_length_; Eigen::Vector3i idx1 = idx0; idx1(i) += 1; if (idx1(i) < resolution_ - 1) { float w1 = voxels_[IndexOf(idx1, resolution_)].weight_; float f1 = voxels_[IndexOf(idx1, resolution_)].tsdf_; const Eigen::Vector3f &c1 = voxels_[IndexOf(idx1, resolution_)].color_; if (w1 != 0.0f && f1 < 0.98f && f1 >= -0.98f && f0 * f1 < 0) { float r0 = ::fabs(f0); float r1 = ::fabs(f1); Eigen::Vector3f p = p0; p(i) = (p0(i) * r1 + p1(i) * r0) / (r0 + r1); point = p + origin_; if (color_type_ == TSDFVolumeColorType::RGB8) { color = (c0 * r1 + c1 * r0) / (r0 + r1) / 255.0f; } else if (color_type_ == TSDFVolumeColorType::Gray32) { color = (c0 * r1 + c1 * r0) / (r0 + r1); } // has_normal normal = GetNormalAt(p, voxels_, voxel_length_, resolution_); } } return thrust::make_tuple(point, normal, color); } }; struct count_valid_voxels_functor { count_valid_voxels_functor(const geometry::TSDFVoxel* voxels, int resolution) : voxels_(voxels), resolution_(resolution) {}; const geometry::TSDFVoxel* voxels_; const int resolution_; __device__ bool operator() (const geometry::TSDFVoxel& v) const { if (v.grid_index_[0] == resolution_ - 1 || v.grid_index_[1] == resolution_ - 1 || v.grid_index_[2] == resolution_ - 1) return false; #pragma unroll for (int i = 0; i < 8; ++i) { Eigen::Vector3i idx = v.grid_index_ + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]); if (voxels_[IndexOf(idx, resolution_)].weight_ == 0.0f) return false; } return true; } }; struct extract_mesh_phase0_functor { extract_mesh_phase0_functor(const geometry::TSDFVoxel *voxels, int resolution) : voxels_(voxels), resolution_(resolution) {}; const geometry::TSDFVoxel *voxels_; const int resolution_; __device__ thrust::tuple<Eigen::Vector3i, int> operator()(size_t idx) { int res2 = (resolution_ - 1) * (resolution_ - 1); int x = idx / res2; int yz = idx % res2; int y = yz / (resolution_ - 1); int z = yz % (resolution_ - 1); int cube_index = 0; Eigen::Vector3i key = Eigen::Vector3i(x, y, z); for (int i = 0; i < 8; ++i) { Eigen::Vector3i idxs = key + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]); if (voxels_[IndexOf(idxs, resolution_)].weight_ == 0.0f) { return thrust::make_tuple(key, -1); } else { float f = voxels_[IndexOf(idxs, resolution_)].tsdf_; if (f < 0.0f) { cube_index |= (1 << i); } } } return thrust::make_tuple(key, cube_index); } }; struct extract_mesh_phase1_functor { extract_mesh_phase1_functor(const geometry::TSDFVoxel *voxels, const Eigen::Vector3i *keys, int resolution, TSDFVolumeColorType color_type) : voxels_(voxels), keys_(keys), resolution_(resolution), color_type_(color_type) {}; const geometry::TSDFVoxel *voxels_; const Eigen::Vector3i* keys_; const int resolution_; TSDFVolumeColorType color_type_; __device__ thrust::tuple<float, Eigen::Vector3f> operator()(size_t idx) { int j = idx / 8; int i = idx % 8; const Eigen::Vector3i& key = keys_[j]; Eigen::Vector3i idxs = key + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]); Eigen::Vector3f c = Eigen::Vector3f::Zero(); if (voxels_[IndexOf(idxs, resolution_)].weight_ == 0.0f) { return thrust::make_tuple(0.0f, c); } else { float f = voxels_[IndexOf(idxs, resolution_)].tsdf_; if (color_type_ == TSDFVolumeColorType::RGB8) { c = voxels_[IndexOf(idxs, resolution_)].color_ / 255.0; } else if (color_type_ == TSDFVolumeColorType::Gray32) { c = voxels_[IndexOf(idxs, resolution_)].color_; } return thrust::make_tuple(f, c); } } }; struct extract_mesh_phase2_functor { extract_mesh_phase2_functor(const Eigen::Vector3i* keys, const int* cube_indices, const Eigen::Vector3f &origin, int resolution, float voxel_length, const float *fs, const Eigen::Vector3f *cs, TSDFVolumeColorType color_type) : keys_(keys), cube_indices_(cube_indices), origin_(origin), resolution_(resolution), voxel_length_(voxel_length), half_voxel_length_(0.5 * voxel_length_), fs_(fs), cs_(cs), color_type_(color_type){}; const Eigen::Vector3i* keys_; const int* cube_indices_; const Eigen::Vector3f origin_; const int resolution_; const float voxel_length_; const float half_voxel_length_; const float *fs_; const Eigen::Vector3f *cs_; const TSDFVolumeColorType color_type_; __device__ thrust::tuple<Eigen::Vector3i, int, int, Eigen::Vector3f, Eigen::Vector3f> operator() (size_t idx) const { int j = idx / 12; const Eigen::Vector3i& xyz = keys_[j]; int cube_index = cube_indices_[j]; int offset = j * 8; int x = xyz[0]; int y = xyz[1]; int z = xyz[2]; int i = idx % 12; if (edge_table[cube_index] & (1 << i)) { Eigen::Vector4i edge_index = Eigen::Vector4i(x, y, z, 0) + Eigen::Vector4i(edge_shift[i][0], edge_shift[i][1], edge_shift[i][2], edge_shift[i][3]); Eigen::Vector3f pt( half_voxel_length_ + voxel_length_ * edge_index(0), half_voxel_length_ + voxel_length_ * edge_index(1), half_voxel_length_ + voxel_length_ * edge_index(2)); float f0 = abs(fs_[offset + edge_to_vert[i][0]]); float f1 = abs(fs_[offset + edge_to_vert[i][1]]); pt(edge_index(3)) += f0 * voxel_length_ / (f0 + f1); Eigen::Vector3f vertex = pt + origin_; Eigen::Vector3f vertex_color = Eigen::Vector3f::Zero(); if (color_type_ != TSDFVolumeColorType::NoColor) { const auto &c0 = cs_[offset + edge_to_vert[i][0]]; const auto &c1 = cs_[offset + edge_to_vert[i][1]]; vertex_color = (f1 * c0 + f0 * c1) / (f0 + f1); } return thrust::make_tuple(xyz, cube_index, i, vertex, vertex_color); } else { Eigen::Vector3i index = -Eigen::Vector3i::Ones(); Eigen::Vector3f vertex = Eigen::Vector3f::Zero(); Eigen::Vector3f vertex_color = Eigen::Vector3f::Zero(); return thrust::make_tuple(index, cube_index, i, vertex, vertex_color); } } }; __constant__ int vert_table[3] = {0, 2, 1}; struct extract_mesh_phase3_functor { extract_mesh_phase3_functor(const int *cube_index, const int *vert_no, const int *key_index, Eigen::Vector3i *triangles) : cube_index_(cube_index), vert_no_(vert_no), key_index_(key_index), triangles_(triangles) {}; const int *cube_index_; const int *vert_no_; const int *key_index_; Eigen::Vector3i *triangles_; __device__ void operator()(size_t idx) { const int kindx0 = key_index_[idx]; const int kindx1 = key_index_[idx + 1]; for (int j = kindx0; j < kindx1; ++j) { const int cindx = cube_index_[j]; for (int i = 0; tri_table[cindx][i] != -1; ++i) { const int tri_idx = tri_table[cindx][i]; for (int l = kindx0; l < kindx1; ++l) { if (vert_no_[l] == tri_idx) { triangles_[idx * 4 + i / 3][vert_table[i % 3]] = l; } } } } } }; struct extract_voxel_pointcloud_functor { extract_voxel_pointcloud_functor(const Eigen::Vector3f &origin, int resolution, float voxel_length) : origin_(origin), resolution_(resolution), voxel_length_(voxel_length), half_voxel_length_(0.5 * voxel_length){}; const Eigen::Vector3f origin_; const int resolution_; const float voxel_length_; const float half_voxel_length_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( const geometry::TSDFVoxel& v) { int x = v.grid_index_[0]; int y = v.grid_index_[1]; int z = v.grid_index_[2]; Eigen::Vector3f pt(half_voxel_length_ + voxel_length_ * x, half_voxel_length_ + voxel_length_ * y, half_voxel_length_ + voxel_length_ * z); if (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f) { float c = (v.tsdf_ + 1.0) * 0.5; return thrust::make_tuple(pt + origin_, Eigen::Vector3f(c, c, c)); } return thrust::make_tuple( Eigen::Vector3f(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()), Eigen::Vector3f(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN())); } }; struct extract_voxel_grid_functor { extract_voxel_grid_functor(int resolution) : resolution_(resolution){}; const int resolution_; __device__ thrust::tuple<Eigen::Vector3i, geometry::Voxel> operator()( const geometry::TSDFVoxel& v) { const float w = v.weight_; const float f = v.tsdf_; if (w != 0.0f && f < 0.98f && f >= -0.98f) { float c = (f + 1.0) * 0.5; return thrust::make_tuple(v.grid_index_, geometry::Voxel(v.grid_index_, Eigen::Vector3f(c, c, c))); } return thrust::make_tuple(Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX), geometry::Voxel()); } }; struct integrate_functor { integrate_functor(const Eigen::Vector3f &origin, float fx, float fy, float cx, float cy, const Eigen::Matrix4f &extrinsic, float voxel_length, float sdf_trunc, float safe_width, float safe_height, int resolution, const uint8_t *color, const uint8_t *depth, const uint8_t *depth_to_camera_distance_multiplier, int width, int num_of_channels, TSDFVolumeColorType color_type, geometry::TSDFVoxel *voxels) : origin_(origin), fx_(fx), fy_(fy), cx_(cx), cy_(cy), extrinsic_(extrinsic), voxel_length_(voxel_length), half_voxel_length_(0.5 * voxel_length), sdf_trunc_(sdf_trunc), sdf_trunc_inv_(1.0 / sdf_trunc), extrinsic_scaled_(voxel_length * extrinsic), safe_width_(safe_width), safe_height_(safe_height), resolution_(resolution), color_(color), depth_(depth), depth_to_camera_distance_multiplier_( depth_to_camera_distance_multiplier), width_(width), num_of_channels_(num_of_channels), color_type_(color_type), voxels_(voxels){}; const Eigen::Vector3f origin_; const float fx_; const float fy_; const float cx_; const float cy_; const Eigen::Matrix4f extrinsic_; const float voxel_length_; const float half_voxel_length_; const float sdf_trunc_; const float sdf_trunc_inv_; const Eigen::Matrix4f extrinsic_scaled_; const float safe_width_; const float safe_height_; const int resolution_; const uint8_t *color_; const uint8_t *depth_; const uint8_t *depth_to_camera_distance_multiplier_; const int width_; const int num_of_channels_; const TSDFVolumeColorType color_type_; geometry::TSDFVoxel *voxels_; __device__ void operator()(size_t idx) { int res2 = resolution_ * resolution_; int x = idx / res2; int yz = idx % res2; int y = yz / resolution_; int z = yz % resolution_; voxels_[idx].grid_index_ = Eigen::Vector3i(x, y, z); Eigen::Vector4f pt_3d_homo( float(half_voxel_length_ + voxel_length_ * x + origin_(0)), float(half_voxel_length_ + voxel_length_ * y + origin_(1)), float(half_voxel_length_ + origin_(2)), 1.f); Eigen::Vector4f pt_camera = extrinsic_ * pt_3d_homo; pt_camera(0) += z * extrinsic_scaled_(0, 2); pt_camera(1) += z * extrinsic_scaled_(1, 2); pt_camera(2) += z * extrinsic_scaled_(2, 2); // Skip if negative depth after projection if (pt_camera(2) <= 0) { return; } // Skip if x-y coordinate not in range float u_f = pt_camera(0) * fx_ / pt_camera(2) + cx_ + 0.5f; float v_f = pt_camera(1) * fy_ / pt_camera(2) + cy_ + 0.5f; if (!(u_f >= 0.0001f && u_f < safe_width_ && v_f >= 0.0001f && v_f < safe_height_)) { return; } // Skip if negative depth in depth image int u = (int)u_f; int v = (int)v_f; float d = *geometry::PointerAt<float>(depth_, width_, u, v); if (d <= 0.0f) { return; } float sdf = (d - pt_camera(2)) * (*geometry::PointerAt<float>( depth_to_camera_distance_multiplier_, width_, u, v)); if (sdf > -sdf_trunc_) { // integrate float tsdf = min(1.0f, sdf * sdf_trunc_inv_); const geometry::TSDFVoxel voxel = voxels_[idx]; voxels_[idx].tsdf_ = (voxel.tsdf_ * voxel.weight_ + tsdf) / (voxel.weight_ + 1.0f); if (color_type_ == TSDFVolumeColorType::RGB8) { const uint8_t *rgb = geometry::PointerAt<uint8_t>( color_, width_, num_of_channels_, u, v, 0); Eigen::Vector3f rgb_f(rgb[0], rgb[1], rgb[2]); voxels_[idx].color_ = (voxel.color_ * voxel.weight_ + rgb_f) / (voxel.weight_ + 1.0f); } else if (color_type_ == TSDFVolumeColorType::Gray32) { const float intensity = *geometry::PointerAt<float>( color_, width_, num_of_channels_, u, v, 0); voxels_[idx].color_ = (voxel.color_.array() * voxel.weight_ + intensity) / (voxel.weight_ + 1.0f); } voxels_[idx].weight_ += 1.0f; } } }; } // namespace UniformTSDFVolume::UniformTSDFVolume( float length, int resolution, float sdf_trunc, TSDFVolumeColorType color_type, const Eigen::Vector3f &origin /* = Eigen::Vector3f::Zero()*/) : TSDFVolume(length / (float)resolution, sdf_trunc, color_type), origin_(origin), length_(length), resolution_(resolution), voxel_num_(resolution * resolution * resolution) { voxels_.resize(voxel_num_); } UniformTSDFVolume::~UniformTSDFVolume() {} UniformTSDFVolume::UniformTSDFVolume(const UniformTSDFVolume &other) : TSDFVolume(other), voxels_(other.voxels_), origin_(other.origin_), length_(other.length_), resolution_(other.resolution_), voxel_num_(other.voxel_num_) {} void UniformTSDFVolume::Reset() { voxels_.clear(); } void UniformTSDFVolume::Integrate( const geometry::RGBDImage &image, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic) { // This function goes through the voxels, and scan convert the relative // depth/color value into the voxel. // The following implementation is a highly optimized version. if ((image.depth_.num_of_channels_ != 1) || (image.depth_.bytes_per_channel_ != 4) || (image.depth_.width_ != intrinsic.width_) || (image.depth_.height_ != intrinsic.height_) || (color_type_ == TSDFVolumeColorType::RGB8 && image.color_.num_of_channels_ != 3) || (color_type_ == TSDFVolumeColorType::RGB8 && image.color_.bytes_per_channel_ != 1) || (color_type_ == TSDFVolumeColorType::Gray32 && image.color_.num_of_channels_ != 1) || (color_type_ == TSDFVolumeColorType::Gray32 && image.color_.bytes_per_channel_ != 4) || (color_type_ != TSDFVolumeColorType::NoColor && image.color_.width_ != intrinsic.width_) || (color_type_ != TSDFVolumeColorType::NoColor && image.color_.height_ != intrinsic.height_)) { utility::LogError( "[UniformTSDFVolume::Integrate] Unsupported image format."); } auto depth2cameradistance = geometry::Image::CreateDepthToCameraDistanceMultiplierFloatImage( intrinsic); IntegrateWithDepthToCameraDistanceMultiplier(image, intrinsic, extrinsic, *depth2cameradistance); } std::shared_ptr<geometry::PointCloud> UniformTSDFVolume::ExtractPointCloud() { auto pointcloud = std::make_shared<geometry::PointCloud>(); size_t n_valid_voxels = thrust::count_if(voxels_.begin(), voxels_.end(), [] __device__ (const geometry::TSDFVoxel& v) { return (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f); }); extract_pointcloud_functor func(thrust::raw_pointer_cast(voxels_.data()), resolution_, voxel_length_, origin_, color_type_); pointcloud->points_.resize(n_valid_voxels); pointcloud->normals_.resize(n_valid_voxels); pointcloud->colors_.resize(n_valid_voxels); size_t n_total = (resolution_ - 2) * (resolution_ - 2) * (resolution_ - 2) * 3; auto begin = make_tuple_begin(pointcloud->points_, pointcloud->normals_, pointcloud->colors_); auto end_p = thrust::copy_if(thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func), thrust::make_transform_iterator(thrust::make_counting_iterator(n_total), func), begin, [] __device__ (const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f>& x) { const Eigen::Vector3f& pt = thrust::get<0>(x); return !(isnan(pt(0)) || isnan(pt(1)) || isnan(pt(2))); }); resize_all(thrust::distance(begin, end_p), pointcloud->points_, pointcloud->normals_, pointcloud->colors_); if (color_type_ == TSDFVolumeColorType::NoColor) pointcloud->colors_.clear(); return pointcloud; } std::shared_ptr<geometry::TriangleMesh> UniformTSDFVolume::ExtractTriangleMesh() { // implementation of marching cubes, based on // http://paulbourke.net/geometry/polygonise/ auto mesh = std::make_shared<geometry::TriangleMesh>(); size_t n_valid_voxels = thrust::count_if(voxels_.begin(), voxels_.end(), count_valid_voxels_functor(thrust::raw_pointer_cast(voxels_.data()), resolution_)); size_t res3 = (resolution_ - 1) * (resolution_ - 1) * (resolution_ - 1); // compute cube indices for each voxels utility::device_vector<Eigen::Vector3i> keys(n_valid_voxels); utility::device_vector<int> cube_indices(n_valid_voxels); extract_mesh_phase0_functor func0(thrust::raw_pointer_cast(voxels_.data()), resolution_); thrust::copy_if( thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func0), thrust::make_transform_iterator(thrust::make_counting_iterator(res3), func0), make_tuple_begin(keys, cube_indices), [] __device__ (const thrust::tuple<Eigen::Vector3i, int>& x) { return thrust::get<1>(x) >= 0; }); auto check_fn = [] __device__(const thrust::tuple<Eigen::Vector3i, int> &x) -> bool { int cidx = thrust::get<1>(x); return (cidx <= 0 || cidx >= 255); }; size_t n_result1 = remove_if_vectors(check_fn, keys, cube_indices); utility::device_vector<float> fs(n_result1 * 8); utility::device_vector<Eigen::Vector3f> cs(n_result1 * 8); extract_mesh_phase1_functor func1(thrust::raw_pointer_cast(voxels_.data()), thrust::raw_pointer_cast(keys.data()), resolution_, color_type_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_result1 * 8), make_tuple_begin(fs, cs), func1); // compute vertices and vertex_colors int* ci_p = thrust::raw_pointer_cast(cube_indices.data()); size_t n_valid_cubes = thrust::count_if(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_result1 * 12), [ci_p] __device__ (size_t idx) { int i = idx / 12; int j = idx % 12; return (edge_table[ci_p[i]] & (1 << j)) > 0; }); resize_all(n_valid_cubes, mesh->vertices_, mesh->vertex_colors_); utility::device_vector<Eigen::Vector3i> repeat_keys(n_valid_cubes); utility::device_vector<int> repeat_cube_indices(n_valid_cubes); utility::device_vector<int> vert_no(n_valid_cubes); extract_mesh_phase2_functor func2(thrust::raw_pointer_cast(keys.data()), thrust::raw_pointer_cast(cube_indices.data()), origin_, voxel_length_, resolution_, thrust::raw_pointer_cast(fs.data()), thrust::raw_pointer_cast(cs.data()), color_type_); thrust::copy_if( thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func2), thrust::make_transform_iterator(thrust::make_counting_iterator(n_result1 * 12), func2), make_tuple_begin(repeat_keys, repeat_cube_indices, vert_no, mesh->vertices_, mesh->vertex_colors_), [] __device__ (const thrust::tuple<Eigen::Vector3i, int, int, Eigen::Vector3f, Eigen::Vector3f>& x) { return thrust::get<0>(x)[0] >= 0; }); // compute triangles utility::device_vector<int> vt_offsets(n_valid_cubes + 1, 0); auto end2 = thrust::reduce_by_key(repeat_keys.begin(), repeat_keys.end(), thrust::make_constant_iterator<int>(1), thrust::make_discard_iterator(), vt_offsets.begin()); size_t n_result2 = thrust::distance(vt_offsets.begin(), end2.second); vt_offsets.resize(n_result2 + 1); thrust::exclusive_scan(vt_offsets.begin(), vt_offsets.end(), vt_offsets.begin()); mesh->triangles_.resize(n_result2 * 4, Eigen::Vector3i(-1, -1, -1)); extract_mesh_phase3_functor func3( thrust::raw_pointer_cast(repeat_cube_indices.data()), thrust::raw_pointer_cast(vert_no.data()), thrust::raw_pointer_cast(vt_offsets.data()), thrust::raw_pointer_cast(mesh->triangles_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_result2), func3); auto end3 = thrust::remove_if( mesh->triangles_.begin(), mesh->triangles_.end(), [] __device__(const Eigen::Vector3i &idxs) { return idxs[0] < 0; }); mesh->triangles_.resize(thrust::distance(mesh->triangles_.begin(), end3)); return mesh; } std::shared_ptr<geometry::PointCloud> UniformTSDFVolume::ExtractVoxelPointCloud() const { auto voxel = std::make_shared<geometry::PointCloud>(); // const float *p_tsdf = (const float *)tsdf_.data(); // const float *p_weight = (const float *)weight_.data(); // const float *p_color = (const float *)color_.data(); size_t n_valid_voxels = thrust::count_if(voxels_.begin(), voxels_.end(), [] __device__ (const geometry::TSDFVoxel& v) { return (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f); }); extract_voxel_pointcloud_functor func(origin_, resolution_, voxel_length_); resize_all(n_valid_voxels, voxel->points_, voxel->colors_); thrust::copy_if( thrust::make_transform_iterator(voxels_.begin(), func), thrust::make_transform_iterator(voxels_.end(), func), make_tuple_begin(voxel->points_, voxel->colors_), [] __device__ (const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f>& x) { const Eigen::Vector3f& pt = thrust::get<0>(x); return !(isnan(pt(0)) || isnan(pt(1)) || isnan(pt(2))); }); voxel->RemoveNoneFinitePoints(true, false); return voxel; } std::shared_ptr<geometry::VoxelGrid> UniformTSDFVolume::ExtractVoxelGrid() const { auto voxel_grid = std::make_shared<geometry::VoxelGrid>(); voxel_grid->voxel_size_ = voxel_length_; voxel_grid->origin_ = origin_; size_t n_valid_voxels = thrust::count_if(voxels_.begin(), voxels_.end(), [] __device__ (const geometry::TSDFVoxel& v) { return (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f); }); resize_all(n_valid_voxels, voxel_grid->voxels_keys_, voxel_grid->voxels_values_); extract_voxel_grid_functor func(resolution_); thrust::copy_if(thrust::make_transform_iterator(voxels_.begin(), func), thrust::make_transform_iterator(voxels_.end(), func), make_tuple_begin(voxel_grid->voxels_keys_, voxel_grid->voxels_values_), [] __device__ (const thrust::tuple<Eigen::Vector3i, geometry::Voxel>& x) { return thrust::get<0>(x) != Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX); }); return voxel_grid; } void UniformTSDFVolume::IntegrateWithDepthToCameraDistanceMultiplier( const geometry::RGBDImage &image, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic, const geometry::Image &depth_to_camera_distance_multiplier) { const float fx = intrinsic.GetFocalLength().first; const float fy = intrinsic.GetFocalLength().second; const float cx = intrinsic.GetPrincipalPoint().first; const float cy = intrinsic.GetPrincipalPoint().second; const float safe_width = intrinsic.width_ - 0.0001f; const float safe_height = intrinsic.height_ - 0.0001f; voxels_.resize(voxel_num_); integrate_functor func( origin_, fx, fy, cx, cy, extrinsic, voxel_length_, sdf_trunc_, safe_width, safe_height, resolution_, thrust::raw_pointer_cast(image.color_.data_.data()), thrust::raw_pointer_cast(image.depth_.data_.data()), thrust::raw_pointer_cast( depth_to_camera_distance_multiplier.data_.data()), image.depth_.width_, image.color_.num_of_channels_, color_type_, thrust::raw_pointer_cast(voxels_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( resolution_ * resolution_ * resolution_), func); }
7443c47d08b589cd543fd487a7e7a8dfe4450855.cu
#include "cupoch/geometry/voxelgrid.h" #include "cupoch/integration/marching_cubes_const.h" #include "cupoch/integration/uniform_tsdfvolume.h" #include "cupoch/utility/helper.h" #include <thrust/iterator/discard_iterator.h> using namespace cupoch; using namespace cupoch::integration; namespace { __device__ float GetTSDFAt(const Eigen::Vector3f &p, const geometry::TSDFVoxel *voxels, float voxel_length, int resolution) { Eigen::Vector3i idx; Eigen::Vector3f p_grid = p / voxel_length - Eigen::Vector3f(0.5, 0.5, 0.5); for (int i = 0; i < 3; i++) { idx(i) = (int)std::floor(p_grid(i)); } Eigen::Vector3f r = p_grid - idx.cast<float>(); float tsdf = 0; tsdf += (1 - r(0)) * (1 - r(1)) * (1 - r(2)) * voxels[IndexOf(idx + Eigen::Vector3i(0, 0, 0), resolution)].tsdf_; tsdf += (1 - r(0)) * (1 - r(1)) * r(2) * voxels[IndexOf(idx + Eigen::Vector3i(0, 0, 1), resolution)].tsdf_; tsdf += (1 - r(0)) * r(1) * (1 - r(2)) * voxels[IndexOf(idx + Eigen::Vector3i(0, 1, 0), resolution)].tsdf_; tsdf += (1 - r(0)) * r(1) * r(2) * voxels[IndexOf(idx + Eigen::Vector3i(0, 1, 1), resolution)].tsdf_; tsdf += r(0) * (1 - r(1)) * (1 - r(2)) * voxels[IndexOf(idx + Eigen::Vector3i(1, 0, 0), resolution)].tsdf_; tsdf += r(0) * (1 - r(1)) * r(2) * voxels[IndexOf(idx + Eigen::Vector3i(1, 0, 1), resolution)].tsdf_; tsdf += r(0) * r(1) * (1 - r(2)) * voxels[IndexOf(idx + Eigen::Vector3i(1, 1, 0), resolution)].tsdf_; tsdf += r(0) * r(1) * r(2) * voxels[IndexOf(idx + Eigen::Vector3i(1, 1, 1), resolution)].tsdf_; return tsdf; } __device__ Eigen::Vector3f GetNormalAt(const Eigen::Vector3f &p, const geometry::TSDFVoxel *voxels, float voxel_length, int resolution) { Eigen::Vector3f n; const double half_gap = 0.99 * voxel_length; #pragma unroll for (int i = 0; i < 3; i++) { Eigen::Vector3f p0 = p; p0(i) -= half_gap; Eigen::Vector3f p1 = p; p1(i) += half_gap; n(i) = GetTSDFAt(p1, voxels, voxel_length, resolution) - GetTSDFAt(p0, voxels, voxel_length, resolution); } return n.normalized(); } struct extract_pointcloud_functor { extract_pointcloud_functor(const geometry::TSDFVoxel* voxels, int resolution, float voxel_length, const Eigen::Vector3f &origin, TSDFVolumeColorType color_type) : voxels_(voxels), resolution_(resolution), voxel_length_(voxel_length), origin_(origin), half_voxel_length_(0.5 * voxel_length_), color_type_(color_type){}; const geometry::TSDFVoxel* voxels_; const int resolution_; const float voxel_length_; const Eigen::Vector3f origin_; const float half_voxel_length_; const TSDFVolumeColorType color_type_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f> operator()(const size_t idx) { int res2 = (resolution_ - 2) * (resolution_ - 2); int x = idx / (3 * res2) + 1; int yzi = idx % (3 * res2); int y = yzi / (3 * (resolution_ - 2)) + 1; int zi = yzi % (3 * (resolution_ - 2)); int z = zi / 3 + 1; int i = zi % 3; Eigen::Vector3f point(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()); Eigen::Vector3f normal(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()); Eigen::Vector3f color(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()); Eigen::Vector3i idx0(x, y, z); float w0 = voxels_[IndexOf(idx0, resolution_)].weight_; float f0 = voxels_[IndexOf(idx0, resolution_)].tsdf_; const Eigen::Vector3f &c0 = voxels_[IndexOf(idx0, resolution_)].color_; if (!(w0 != 0.0f && f0 < 0.98f && f0 >= -0.98f)) { return thrust::make_tuple(point, normal, color); } Eigen::Vector3f p0(half_voxel_length_ + voxel_length_ * x, half_voxel_length_ + voxel_length_ * y, half_voxel_length_ + voxel_length_ * z); Eigen::Vector3f p1 = p0; p1(i) += voxel_length_; Eigen::Vector3i idx1 = idx0; idx1(i) += 1; if (idx1(i) < resolution_ - 1) { float w1 = voxels_[IndexOf(idx1, resolution_)].weight_; float f1 = voxels_[IndexOf(idx1, resolution_)].tsdf_; const Eigen::Vector3f &c1 = voxels_[IndexOf(idx1, resolution_)].color_; if (w1 != 0.0f && f1 < 0.98f && f1 >= -0.98f && f0 * f1 < 0) { float r0 = std::fabs(f0); float r1 = std::fabs(f1); Eigen::Vector3f p = p0; p(i) = (p0(i) * r1 + p1(i) * r0) / (r0 + r1); point = p + origin_; if (color_type_ == TSDFVolumeColorType::RGB8) { color = (c0 * r1 + c1 * r0) / (r0 + r1) / 255.0f; } else if (color_type_ == TSDFVolumeColorType::Gray32) { color = (c0 * r1 + c1 * r0) / (r0 + r1); } // has_normal normal = GetNormalAt(p, voxels_, voxel_length_, resolution_); } } return thrust::make_tuple(point, normal, color); } }; struct count_valid_voxels_functor { count_valid_voxels_functor(const geometry::TSDFVoxel* voxels, int resolution) : voxels_(voxels), resolution_(resolution) {}; const geometry::TSDFVoxel* voxels_; const int resolution_; __device__ bool operator() (const geometry::TSDFVoxel& v) const { if (v.grid_index_[0] == resolution_ - 1 || v.grid_index_[1] == resolution_ - 1 || v.grid_index_[2] == resolution_ - 1) return false; #pragma unroll for (int i = 0; i < 8; ++i) { Eigen::Vector3i idx = v.grid_index_ + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]); if (voxels_[IndexOf(idx, resolution_)].weight_ == 0.0f) return false; } return true; } }; struct extract_mesh_phase0_functor { extract_mesh_phase0_functor(const geometry::TSDFVoxel *voxels, int resolution) : voxels_(voxels), resolution_(resolution) {}; const geometry::TSDFVoxel *voxels_; const int resolution_; __device__ thrust::tuple<Eigen::Vector3i, int> operator()(size_t idx) { int res2 = (resolution_ - 1) * (resolution_ - 1); int x = idx / res2; int yz = idx % res2; int y = yz / (resolution_ - 1); int z = yz % (resolution_ - 1); int cube_index = 0; Eigen::Vector3i key = Eigen::Vector3i(x, y, z); for (int i = 0; i < 8; ++i) { Eigen::Vector3i idxs = key + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]); if (voxels_[IndexOf(idxs, resolution_)].weight_ == 0.0f) { return thrust::make_tuple(key, -1); } else { float f = voxels_[IndexOf(idxs, resolution_)].tsdf_; if (f < 0.0f) { cube_index |= (1 << i); } } } return thrust::make_tuple(key, cube_index); } }; struct extract_mesh_phase1_functor { extract_mesh_phase1_functor(const geometry::TSDFVoxel *voxels, const Eigen::Vector3i *keys, int resolution, TSDFVolumeColorType color_type) : voxels_(voxels), keys_(keys), resolution_(resolution), color_type_(color_type) {}; const geometry::TSDFVoxel *voxels_; const Eigen::Vector3i* keys_; const int resolution_; TSDFVolumeColorType color_type_; __device__ thrust::tuple<float, Eigen::Vector3f> operator()(size_t idx) { int j = idx / 8; int i = idx % 8; const Eigen::Vector3i& key = keys_[j]; Eigen::Vector3i idxs = key + Eigen::Vector3i(shift[i][0], shift[i][1], shift[i][2]); Eigen::Vector3f c = Eigen::Vector3f::Zero(); if (voxels_[IndexOf(idxs, resolution_)].weight_ == 0.0f) { return thrust::make_tuple(0.0f, c); } else { float f = voxels_[IndexOf(idxs, resolution_)].tsdf_; if (color_type_ == TSDFVolumeColorType::RGB8) { c = voxels_[IndexOf(idxs, resolution_)].color_ / 255.0; } else if (color_type_ == TSDFVolumeColorType::Gray32) { c = voxels_[IndexOf(idxs, resolution_)].color_; } return thrust::make_tuple(f, c); } } }; struct extract_mesh_phase2_functor { extract_mesh_phase2_functor(const Eigen::Vector3i* keys, const int* cube_indices, const Eigen::Vector3f &origin, int resolution, float voxel_length, const float *fs, const Eigen::Vector3f *cs, TSDFVolumeColorType color_type) : keys_(keys), cube_indices_(cube_indices), origin_(origin), resolution_(resolution), voxel_length_(voxel_length), half_voxel_length_(0.5 * voxel_length_), fs_(fs), cs_(cs), color_type_(color_type){}; const Eigen::Vector3i* keys_; const int* cube_indices_; const Eigen::Vector3f origin_; const int resolution_; const float voxel_length_; const float half_voxel_length_; const float *fs_; const Eigen::Vector3f *cs_; const TSDFVolumeColorType color_type_; __device__ thrust::tuple<Eigen::Vector3i, int, int, Eigen::Vector3f, Eigen::Vector3f> operator() (size_t idx) const { int j = idx / 12; const Eigen::Vector3i& xyz = keys_[j]; int cube_index = cube_indices_[j]; int offset = j * 8; int x = xyz[0]; int y = xyz[1]; int z = xyz[2]; int i = idx % 12; if (edge_table[cube_index] & (1 << i)) { Eigen::Vector4i edge_index = Eigen::Vector4i(x, y, z, 0) + Eigen::Vector4i(edge_shift[i][0], edge_shift[i][1], edge_shift[i][2], edge_shift[i][3]); Eigen::Vector3f pt( half_voxel_length_ + voxel_length_ * edge_index(0), half_voxel_length_ + voxel_length_ * edge_index(1), half_voxel_length_ + voxel_length_ * edge_index(2)); float f0 = abs(fs_[offset + edge_to_vert[i][0]]); float f1 = abs(fs_[offset + edge_to_vert[i][1]]); pt(edge_index(3)) += f0 * voxel_length_ / (f0 + f1); Eigen::Vector3f vertex = pt + origin_; Eigen::Vector3f vertex_color = Eigen::Vector3f::Zero(); if (color_type_ != TSDFVolumeColorType::NoColor) { const auto &c0 = cs_[offset + edge_to_vert[i][0]]; const auto &c1 = cs_[offset + edge_to_vert[i][1]]; vertex_color = (f1 * c0 + f0 * c1) / (f0 + f1); } return thrust::make_tuple(xyz, cube_index, i, vertex, vertex_color); } else { Eigen::Vector3i index = -Eigen::Vector3i::Ones(); Eigen::Vector3f vertex = Eigen::Vector3f::Zero(); Eigen::Vector3f vertex_color = Eigen::Vector3f::Zero(); return thrust::make_tuple(index, cube_index, i, vertex, vertex_color); } } }; __constant__ int vert_table[3] = {0, 2, 1}; struct extract_mesh_phase3_functor { extract_mesh_phase3_functor(const int *cube_index, const int *vert_no, const int *key_index, Eigen::Vector3i *triangles) : cube_index_(cube_index), vert_no_(vert_no), key_index_(key_index), triangles_(triangles) {}; const int *cube_index_; const int *vert_no_; const int *key_index_; Eigen::Vector3i *triangles_; __device__ void operator()(size_t idx) { const int kindx0 = key_index_[idx]; const int kindx1 = key_index_[idx + 1]; for (int j = kindx0; j < kindx1; ++j) { const int cindx = cube_index_[j]; for (int i = 0; tri_table[cindx][i] != -1; ++i) { const int tri_idx = tri_table[cindx][i]; for (int l = kindx0; l < kindx1; ++l) { if (vert_no_[l] == tri_idx) { triangles_[idx * 4 + i / 3][vert_table[i % 3]] = l; } } } } } }; struct extract_voxel_pointcloud_functor { extract_voxel_pointcloud_functor(const Eigen::Vector3f &origin, int resolution, float voxel_length) : origin_(origin), resolution_(resolution), voxel_length_(voxel_length), half_voxel_length_(0.5 * voxel_length){}; const Eigen::Vector3f origin_; const int resolution_; const float voxel_length_; const float half_voxel_length_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator()( const geometry::TSDFVoxel& v) { int x = v.grid_index_[0]; int y = v.grid_index_[1]; int z = v.grid_index_[2]; Eigen::Vector3f pt(half_voxel_length_ + voxel_length_ * x, half_voxel_length_ + voxel_length_ * y, half_voxel_length_ + voxel_length_ * z); if (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f) { float c = (v.tsdf_ + 1.0) * 0.5; return thrust::make_tuple(pt + origin_, Eigen::Vector3f(c, c, c)); } return thrust::make_tuple( Eigen::Vector3f(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN()), Eigen::Vector3f(std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN(), std::numeric_limits<float>::quiet_NaN())); } }; struct extract_voxel_grid_functor { extract_voxel_grid_functor(int resolution) : resolution_(resolution){}; const int resolution_; __device__ thrust::tuple<Eigen::Vector3i, geometry::Voxel> operator()( const geometry::TSDFVoxel& v) { const float w = v.weight_; const float f = v.tsdf_; if (w != 0.0f && f < 0.98f && f >= -0.98f) { float c = (f + 1.0) * 0.5; return thrust::make_tuple(v.grid_index_, geometry::Voxel(v.grid_index_, Eigen::Vector3f(c, c, c))); } return thrust::make_tuple(Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX), geometry::Voxel()); } }; struct integrate_functor { integrate_functor(const Eigen::Vector3f &origin, float fx, float fy, float cx, float cy, const Eigen::Matrix4f &extrinsic, float voxel_length, float sdf_trunc, float safe_width, float safe_height, int resolution, const uint8_t *color, const uint8_t *depth, const uint8_t *depth_to_camera_distance_multiplier, int width, int num_of_channels, TSDFVolumeColorType color_type, geometry::TSDFVoxel *voxels) : origin_(origin), fx_(fx), fy_(fy), cx_(cx), cy_(cy), extrinsic_(extrinsic), voxel_length_(voxel_length), half_voxel_length_(0.5 * voxel_length), sdf_trunc_(sdf_trunc), sdf_trunc_inv_(1.0 / sdf_trunc), extrinsic_scaled_(voxel_length * extrinsic), safe_width_(safe_width), safe_height_(safe_height), resolution_(resolution), color_(color), depth_(depth), depth_to_camera_distance_multiplier_( depth_to_camera_distance_multiplier), width_(width), num_of_channels_(num_of_channels), color_type_(color_type), voxels_(voxels){}; const Eigen::Vector3f origin_; const float fx_; const float fy_; const float cx_; const float cy_; const Eigen::Matrix4f extrinsic_; const float voxel_length_; const float half_voxel_length_; const float sdf_trunc_; const float sdf_trunc_inv_; const Eigen::Matrix4f extrinsic_scaled_; const float safe_width_; const float safe_height_; const int resolution_; const uint8_t *color_; const uint8_t *depth_; const uint8_t *depth_to_camera_distance_multiplier_; const int width_; const int num_of_channels_; const TSDFVolumeColorType color_type_; geometry::TSDFVoxel *voxels_; __device__ void operator()(size_t idx) { int res2 = resolution_ * resolution_; int x = idx / res2; int yz = idx % res2; int y = yz / resolution_; int z = yz % resolution_; voxels_[idx].grid_index_ = Eigen::Vector3i(x, y, z); Eigen::Vector4f pt_3d_homo( float(half_voxel_length_ + voxel_length_ * x + origin_(0)), float(half_voxel_length_ + voxel_length_ * y + origin_(1)), float(half_voxel_length_ + origin_(2)), 1.f); Eigen::Vector4f pt_camera = extrinsic_ * pt_3d_homo; pt_camera(0) += z * extrinsic_scaled_(0, 2); pt_camera(1) += z * extrinsic_scaled_(1, 2); pt_camera(2) += z * extrinsic_scaled_(2, 2); // Skip if negative depth after projection if (pt_camera(2) <= 0) { return; } // Skip if x-y coordinate not in range float u_f = pt_camera(0) * fx_ / pt_camera(2) + cx_ + 0.5f; float v_f = pt_camera(1) * fy_ / pt_camera(2) + cy_ + 0.5f; if (!(u_f >= 0.0001f && u_f < safe_width_ && v_f >= 0.0001f && v_f < safe_height_)) { return; } // Skip if negative depth in depth image int u = (int)u_f; int v = (int)v_f; float d = *geometry::PointerAt<float>(depth_, width_, u, v); if (d <= 0.0f) { return; } float sdf = (d - pt_camera(2)) * (*geometry::PointerAt<float>( depth_to_camera_distance_multiplier_, width_, u, v)); if (sdf > -sdf_trunc_) { // integrate float tsdf = min(1.0f, sdf * sdf_trunc_inv_); const geometry::TSDFVoxel voxel = voxels_[idx]; voxels_[idx].tsdf_ = (voxel.tsdf_ * voxel.weight_ + tsdf) / (voxel.weight_ + 1.0f); if (color_type_ == TSDFVolumeColorType::RGB8) { const uint8_t *rgb = geometry::PointerAt<uint8_t>( color_, width_, num_of_channels_, u, v, 0); Eigen::Vector3f rgb_f(rgb[0], rgb[1], rgb[2]); voxels_[idx].color_ = (voxel.color_ * voxel.weight_ + rgb_f) / (voxel.weight_ + 1.0f); } else if (color_type_ == TSDFVolumeColorType::Gray32) { const float intensity = *geometry::PointerAt<float>( color_, width_, num_of_channels_, u, v, 0); voxels_[idx].color_ = (voxel.color_.array() * voxel.weight_ + intensity) / (voxel.weight_ + 1.0f); } voxels_[idx].weight_ += 1.0f; } } }; } // namespace UniformTSDFVolume::UniformTSDFVolume( float length, int resolution, float sdf_trunc, TSDFVolumeColorType color_type, const Eigen::Vector3f &origin /* = Eigen::Vector3f::Zero()*/) : TSDFVolume(length / (float)resolution, sdf_trunc, color_type), origin_(origin), length_(length), resolution_(resolution), voxel_num_(resolution * resolution * resolution) { voxels_.resize(voxel_num_); } UniformTSDFVolume::~UniformTSDFVolume() {} UniformTSDFVolume::UniformTSDFVolume(const UniformTSDFVolume &other) : TSDFVolume(other), voxels_(other.voxels_), origin_(other.origin_), length_(other.length_), resolution_(other.resolution_), voxel_num_(other.voxel_num_) {} void UniformTSDFVolume::Reset() { voxels_.clear(); } void UniformTSDFVolume::Integrate( const geometry::RGBDImage &image, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic) { // This function goes through the voxels, and scan convert the relative // depth/color value into the voxel. // The following implementation is a highly optimized version. if ((image.depth_.num_of_channels_ != 1) || (image.depth_.bytes_per_channel_ != 4) || (image.depth_.width_ != intrinsic.width_) || (image.depth_.height_ != intrinsic.height_) || (color_type_ == TSDFVolumeColorType::RGB8 && image.color_.num_of_channels_ != 3) || (color_type_ == TSDFVolumeColorType::RGB8 && image.color_.bytes_per_channel_ != 1) || (color_type_ == TSDFVolumeColorType::Gray32 && image.color_.num_of_channels_ != 1) || (color_type_ == TSDFVolumeColorType::Gray32 && image.color_.bytes_per_channel_ != 4) || (color_type_ != TSDFVolumeColorType::NoColor && image.color_.width_ != intrinsic.width_) || (color_type_ != TSDFVolumeColorType::NoColor && image.color_.height_ != intrinsic.height_)) { utility::LogError( "[UniformTSDFVolume::Integrate] Unsupported image format."); } auto depth2cameradistance = geometry::Image::CreateDepthToCameraDistanceMultiplierFloatImage( intrinsic); IntegrateWithDepthToCameraDistanceMultiplier(image, intrinsic, extrinsic, *depth2cameradistance); } std::shared_ptr<geometry::PointCloud> UniformTSDFVolume::ExtractPointCloud() { auto pointcloud = std::make_shared<geometry::PointCloud>(); size_t n_valid_voxels = thrust::count_if(voxels_.begin(), voxels_.end(), [] __device__ (const geometry::TSDFVoxel& v) { return (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f); }); extract_pointcloud_functor func(thrust::raw_pointer_cast(voxels_.data()), resolution_, voxel_length_, origin_, color_type_); pointcloud->points_.resize(n_valid_voxels); pointcloud->normals_.resize(n_valid_voxels); pointcloud->colors_.resize(n_valid_voxels); size_t n_total = (resolution_ - 2) * (resolution_ - 2) * (resolution_ - 2) * 3; auto begin = make_tuple_begin(pointcloud->points_, pointcloud->normals_, pointcloud->colors_); auto end_p = thrust::copy_if(thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func), thrust::make_transform_iterator(thrust::make_counting_iterator(n_total), func), begin, [] __device__ (const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector3f>& x) { const Eigen::Vector3f& pt = thrust::get<0>(x); return !(isnan(pt(0)) || isnan(pt(1)) || isnan(pt(2))); }); resize_all(thrust::distance(begin, end_p), pointcloud->points_, pointcloud->normals_, pointcloud->colors_); if (color_type_ == TSDFVolumeColorType::NoColor) pointcloud->colors_.clear(); return pointcloud; } std::shared_ptr<geometry::TriangleMesh> UniformTSDFVolume::ExtractTriangleMesh() { // implementation of marching cubes, based on // http://paulbourke.net/geometry/polygonise/ auto mesh = std::make_shared<geometry::TriangleMesh>(); size_t n_valid_voxels = thrust::count_if(voxels_.begin(), voxels_.end(), count_valid_voxels_functor(thrust::raw_pointer_cast(voxels_.data()), resolution_)); size_t res3 = (resolution_ - 1) * (resolution_ - 1) * (resolution_ - 1); // compute cube indices for each voxels utility::device_vector<Eigen::Vector3i> keys(n_valid_voxels); utility::device_vector<int> cube_indices(n_valid_voxels); extract_mesh_phase0_functor func0(thrust::raw_pointer_cast(voxels_.data()), resolution_); thrust::copy_if( thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func0), thrust::make_transform_iterator(thrust::make_counting_iterator(res3), func0), make_tuple_begin(keys, cube_indices), [] __device__ (const thrust::tuple<Eigen::Vector3i, int>& x) { return thrust::get<1>(x) >= 0; }); auto check_fn = [] __device__(const thrust::tuple<Eigen::Vector3i, int> &x) -> bool { int cidx = thrust::get<1>(x); return (cidx <= 0 || cidx >= 255); }; size_t n_result1 = remove_if_vectors(check_fn, keys, cube_indices); utility::device_vector<float> fs(n_result1 * 8); utility::device_vector<Eigen::Vector3f> cs(n_result1 * 8); extract_mesh_phase1_functor func1(thrust::raw_pointer_cast(voxels_.data()), thrust::raw_pointer_cast(keys.data()), resolution_, color_type_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_result1 * 8), make_tuple_begin(fs, cs), func1); // compute vertices and vertex_colors int* ci_p = thrust::raw_pointer_cast(cube_indices.data()); size_t n_valid_cubes = thrust::count_if(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_result1 * 12), [ci_p] __device__ (size_t idx) { int i = idx / 12; int j = idx % 12; return (edge_table[ci_p[i]] & (1 << j)) > 0; }); resize_all(n_valid_cubes, mesh->vertices_, mesh->vertex_colors_); utility::device_vector<Eigen::Vector3i> repeat_keys(n_valid_cubes); utility::device_vector<int> repeat_cube_indices(n_valid_cubes); utility::device_vector<int> vert_no(n_valid_cubes); extract_mesh_phase2_functor func2(thrust::raw_pointer_cast(keys.data()), thrust::raw_pointer_cast(cube_indices.data()), origin_, voxel_length_, resolution_, thrust::raw_pointer_cast(fs.data()), thrust::raw_pointer_cast(cs.data()), color_type_); thrust::copy_if( thrust::make_transform_iterator(thrust::make_counting_iterator<size_t>(0), func2), thrust::make_transform_iterator(thrust::make_counting_iterator(n_result1 * 12), func2), make_tuple_begin(repeat_keys, repeat_cube_indices, vert_no, mesh->vertices_, mesh->vertex_colors_), [] __device__ (const thrust::tuple<Eigen::Vector3i, int, int, Eigen::Vector3f, Eigen::Vector3f>& x) { return thrust::get<0>(x)[0] >= 0; }); // compute triangles utility::device_vector<int> vt_offsets(n_valid_cubes + 1, 0); auto end2 = thrust::reduce_by_key(repeat_keys.begin(), repeat_keys.end(), thrust::make_constant_iterator<int>(1), thrust::make_discard_iterator(), vt_offsets.begin()); size_t n_result2 = thrust::distance(vt_offsets.begin(), end2.second); vt_offsets.resize(n_result2 + 1); thrust::exclusive_scan(vt_offsets.begin(), vt_offsets.end(), vt_offsets.begin()); mesh->triangles_.resize(n_result2 * 4, Eigen::Vector3i(-1, -1, -1)); extract_mesh_phase3_functor func3( thrust::raw_pointer_cast(repeat_cube_indices.data()), thrust::raw_pointer_cast(vert_no.data()), thrust::raw_pointer_cast(vt_offsets.data()), thrust::raw_pointer_cast(mesh->triangles_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(n_result2), func3); auto end3 = thrust::remove_if( mesh->triangles_.begin(), mesh->triangles_.end(), [] __device__(const Eigen::Vector3i &idxs) { return idxs[0] < 0; }); mesh->triangles_.resize(thrust::distance(mesh->triangles_.begin(), end3)); return mesh; } std::shared_ptr<geometry::PointCloud> UniformTSDFVolume::ExtractVoxelPointCloud() const { auto voxel = std::make_shared<geometry::PointCloud>(); // const float *p_tsdf = (const float *)tsdf_.data(); // const float *p_weight = (const float *)weight_.data(); // const float *p_color = (const float *)color_.data(); size_t n_valid_voxels = thrust::count_if(voxels_.begin(), voxels_.end(), [] __device__ (const geometry::TSDFVoxel& v) { return (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f); }); extract_voxel_pointcloud_functor func(origin_, resolution_, voxel_length_); resize_all(n_valid_voxels, voxel->points_, voxel->colors_); thrust::copy_if( thrust::make_transform_iterator(voxels_.begin(), func), thrust::make_transform_iterator(voxels_.end(), func), make_tuple_begin(voxel->points_, voxel->colors_), [] __device__ (const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f>& x) { const Eigen::Vector3f& pt = thrust::get<0>(x); return !(isnan(pt(0)) || isnan(pt(1)) || isnan(pt(2))); }); voxel->RemoveNoneFinitePoints(true, false); return voxel; } std::shared_ptr<geometry::VoxelGrid> UniformTSDFVolume::ExtractVoxelGrid() const { auto voxel_grid = std::make_shared<geometry::VoxelGrid>(); voxel_grid->voxel_size_ = voxel_length_; voxel_grid->origin_ = origin_; size_t n_valid_voxels = thrust::count_if(voxels_.begin(), voxels_.end(), [] __device__ (const geometry::TSDFVoxel& v) { return (v.weight_ != 0.0f && v.tsdf_ < 0.98f && v.tsdf_ >= -0.98f); }); resize_all(n_valid_voxels, voxel_grid->voxels_keys_, voxel_grid->voxels_values_); extract_voxel_grid_functor func(resolution_); thrust::copy_if(thrust::make_transform_iterator(voxels_.begin(), func), thrust::make_transform_iterator(voxels_.end(), func), make_tuple_begin(voxel_grid->voxels_keys_, voxel_grid->voxels_values_), [] __device__ (const thrust::tuple<Eigen::Vector3i, geometry::Voxel>& x) { return thrust::get<0>(x) != Eigen::Vector3i(geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX, geometry::INVALID_VOXEL_INDEX); }); return voxel_grid; } void UniformTSDFVolume::IntegrateWithDepthToCameraDistanceMultiplier( const geometry::RGBDImage &image, const camera::PinholeCameraIntrinsic &intrinsic, const Eigen::Matrix4f &extrinsic, const geometry::Image &depth_to_camera_distance_multiplier) { const float fx = intrinsic.GetFocalLength().first; const float fy = intrinsic.GetFocalLength().second; const float cx = intrinsic.GetPrincipalPoint().first; const float cy = intrinsic.GetPrincipalPoint().second; const float safe_width = intrinsic.width_ - 0.0001f; const float safe_height = intrinsic.height_ - 0.0001f; voxels_.resize(voxel_num_); integrate_functor func( origin_, fx, fy, cx, cy, extrinsic, voxel_length_, sdf_trunc_, safe_width, safe_height, resolution_, thrust::raw_pointer_cast(image.color_.data_.data()), thrust::raw_pointer_cast(image.depth_.data_.data()), thrust::raw_pointer_cast( depth_to_camera_distance_multiplier.data_.data()), image.depth_.width_, image.color_.num_of_channels_, color_type_, thrust::raw_pointer_cast(voxels_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>( resolution_ * resolution_ * resolution_), func); }
28c33944e6f68588fa6d9eecf95af9b8fe44a25c.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include <hip/hip_runtime_api.h> #include <stdint.h> #define OFFSET_BANK(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); }) __global__ void inner_prod_blockreduce_batch_kernel( const float *xs, int len, int batch_size, const float *ws, float alpha, float *sum) { __shared__ float cache[1024 + 32]; int tid = threadIdx.x; int block = blockIdx.x; int i = tid + block * len; if (tid < len && block < batch_size) { cache[OFFSET_BANK(tid)] = ws[i] * xs[i]; } else { cache[OFFSET_BANK(tid)] = 0.0f; } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (tid % (2*s) == 0 && (tid + s) < len) { cache[OFFSET_BANK(tid)] += cache[OFFSET_BANK(tid + s)]; } __syncthreads(); } if (tid == 0) { if (alpha != 0.0f) { float sum_0 = sum[block]; sum[block] = alpha * sum_0 + cache[0]; } else { sum[block] = cache[0]; } } } extern "C" void rembrandt_kernel_inner_prod_blockreduce_batch( const float *xs, int len, int batch_size, const float *ws, float alpha, float *sum, hipStream_t stream) { // XXX: assert(len <= 1024); // FIXME(20151022): could make more efficient use of blocks but w/e. int n = batch_size * 1024; hipLaunchKernelGGL(( inner_prod_blockreduce_batch_kernel), dim3((n+1024-1)/1024), dim3(1024), 0, stream, xs, len, batch_size, ws, alpha, sum); CUDA_POST_KERNEL_CHECK; }
28c33944e6f68588fa6d9eecf95af9b8fe44a25c.cu
#include "common.h" #include <cuda_runtime_api.h> #include <stdint.h> #define OFFSET_BANK(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); }) __global__ void inner_prod_blockreduce_batch_kernel( const float *xs, int len, int batch_size, const float *ws, float alpha, float *sum) { __shared__ float cache[1024 + 32]; int tid = threadIdx.x; int block = blockIdx.x; int i = tid + block * len; if (tid < len && block < batch_size) { cache[OFFSET_BANK(tid)] = ws[i] * xs[i]; } else { cache[OFFSET_BANK(tid)] = 0.0f; } __syncthreads(); for (int s = 1; s < blockDim.x; s *= 2) { if (tid % (2*s) == 0 && (tid + s) < len) { cache[OFFSET_BANK(tid)] += cache[OFFSET_BANK(tid + s)]; } __syncthreads(); } if (tid == 0) { if (alpha != 0.0f) { float sum_0 = sum[block]; sum[block] = alpha * sum_0 + cache[0]; } else { sum[block] = cache[0]; } } } extern "C" void rembrandt_kernel_inner_prod_blockreduce_batch( const float *xs, int len, int batch_size, const float *ws, float alpha, float *sum, cudaStream_t stream) { // XXX: assert(len <= 1024); // FIXME(20151022): could make more efficient use of blocks but w/e. int n = batch_size * 1024; inner_prod_blockreduce_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>( xs, len, batch_size, ws, alpha, sum); CUDA_POST_KERNEL_CHECK; }