hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
ae038c519573d992f41897cfa8f1a913b462ff1b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* change dimension size as needed */ const int dimension = 4096 ; const int blocksize = 64; const int K = 4; const int tilewidth = 16 ; struct timeval tv; __global__ void gpuSmMM( float *Ad , float *Bd , float *Cd , int dimention ) { //Taking shared array to break the MAtrix in Tile widht and fatch them in that array per ele __shared__ float Ads [tilewidth][tilewidth] ; __shared__ float Bds [tilewidth][tilewidth] ; // calculate thread id unsigned int col = tilewidth*blockIdx.x + threadIdx.x ; unsigned int row = tilewidth*blockIdx.y + threadIdx.y ; for (int m = 0 ; m<dimention/tilewidth ; m++ ) // m indicate number of phase { Ads[threadIdx.y][threadIdx.x] = Ad[row*dimention + (m*tilewidth + threadIdx.x)] ; Bds[threadIdx.y][threadIdx.x] = Bd[ ( m*tilewidth + threadIdx.y) * dimention + col] ; __syncthreads() ; // for syncronizeing the threads // Do for tile for ( int k1 = 0; k1<tilewidth ; k1++ ) Cd[row*dimention + col]+= Ads[threadIdx.x][k1] * Bds[k1][threadIdx.y] ; __syncthreads() ; // for syncronizeing the threads } } int main(int argc, char *argv[]) { hipEvent_t start_i, stop_i,start_mc_h2d, stop_mc_h2d,start_mc_d2h, stop_mc_d2h,start_pl, stop_pl; float time_i,time_mc_h2d,time_mc_d2h,time_pl; hipEventCreate(&start_i); hipEventCreate(&stop_i); hipEventCreate(&start_mc_h2d); hipEventCreate(&stop_mc_h2d); hipEventCreate(&start_mc_d2h); hipEventCreate(&stop_mc_d2h); hipEventCreate(&start_pl); hipEventCreate(&stop_pl); int i, j; float *A, *B, *C;// start, end; float *Ad, *Bd, *Cd; hipEventRecord( start_i, 0 ); A = (float*)malloc(dimension*dimension*sizeof(float)); B = (float*)malloc(dimension*dimension*sizeof(float)); C = (float*)malloc(dimension*dimension*sizeof(float)); srand(292); for(i = 0; i < dimension; i++) for(j = 0; j < dimension; j++) { A[dimension*i+j] = (rand()/(RAND_MAX + 1.0)); B[dimension*i+j] = (rand()/(RAND_MAX + 1.0)); C[dimension*i+j] = 0.0; } hipEventRecord( stop_i, 0 ); hipEventSynchronize( stop_i ); hipEventElapsedTime( &time_i, start_i, stop_i ); hipEventRecord( start_mc_h2d, 0 ); hipMalloc( (void**)&Ad, dimension*dimension*sizeof(float) ); hipMemcpy( Ad, A, dimension*dimension*sizeof(float), hipMemcpyHostToDevice ); hipMalloc( (void**)&Bd, dimension*dimension*sizeof(float) ); hipMemcpy( Bd, B, dimension*dimension*sizeof(float), hipMemcpyHostToDevice ); hipMalloc( (void**)&Cd, dimension*dimension*sizeof(float) ); hipEventRecord( stop_mc_h2d, 0 ); hipEventSynchronize( stop_mc_h2d ); hipEventElapsedTime( &time_mc_h2d, start_mc_h2d, stop_mc_h2d ); //start = timestamp(); hipEventRecord( start_pl, 0 ); dim3 threadBlock(blocksize,blocksize); dim3 grid(K,K); hipLaunchKernelGGL(( gpuSmMM), dim3(grid),dim3(threadBlock), 0, 0, Ad,Bd,Cd,dimension); //end = timestamp(); hipEventRecord( stop_pl, 0 ); hipEventSynchronize( stop_pl ); hipEventElapsedTime( &time_pl, start_pl, stop_pl ); hipEventRecord( start_mc_d2h, 0 ); hipMemcpy(C,Cd,dimension*dimension*sizeof(float),hipMemcpyDeviceToHost); hipEventRecord( stop_mc_d2h, 0 ); hipEventSynchronize( stop_mc_d2h ); hipEventElapsedTime( &time_mc_d2h, start_mc_d2h, stop_mc_d2h ); printf("IT: %f ", time_i); printf("MC: %f ", ( time_mc_d2h + time_mc_h2d ) ); printf("PLT: %f ", time_pl); printf("T:%f .... \n\n", (time_pl + time_mc_d2h + time_mc_h2d+time_i)); //printf("\nsecs:%f\n", end-start); hipEventDestroy( start_i ); hipEventDestroy( stop_i ); hipEventDestroy( start_mc_d2h ); hipEventDestroy( stop_mc_d2h ); hipEventDestroy( start_mc_h2d ); hipEventDestroy( stop_mc_h2d ); hipEventDestroy( start_pl ); hipEventDestroy( stop_pl ); free(A); free(B); free(C); hipFree(Ad); hipFree(Bd); hipFree(Cd); return 0; }
ae038c519573d992f41897cfa8f1a913b462ff1b.cu
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* change dimension size as needed */ const int dimension = 4096 ; const int blocksize = 64; const int K = 4; const int tilewidth = 16 ; struct timeval tv; __global__ void gpuSmMM( float *Ad , float *Bd , float *Cd , int dimention ) { //Taking shared array to break the MAtrix in Tile widht and fatch them in that array per ele __shared__ float Ads [tilewidth][tilewidth] ; __shared__ float Bds [tilewidth][tilewidth] ; // calculate thread id unsigned int col = tilewidth*blockIdx.x + threadIdx.x ; unsigned int row = tilewidth*blockIdx.y + threadIdx.y ; for (int m = 0 ; m<dimention/tilewidth ; m++ ) // m indicate number of phase { Ads[threadIdx.y][threadIdx.x] = Ad[row*dimention + (m*tilewidth + threadIdx.x)] ; Bds[threadIdx.y][threadIdx.x] = Bd[ ( m*tilewidth + threadIdx.y) * dimention + col] ; __syncthreads() ; // for syncronizeing the threads // Do for tile for ( int k1 = 0; k1<tilewidth ; k1++ ) Cd[row*dimention + col]+= Ads[threadIdx.x][k1] * Bds[k1][threadIdx.y] ; __syncthreads() ; // for syncronizeing the threads } } int main(int argc, char *argv[]) { cudaEvent_t start_i, stop_i,start_mc_h2d, stop_mc_h2d,start_mc_d2h, stop_mc_d2h,start_pl, stop_pl; float time_i,time_mc_h2d,time_mc_d2h,time_pl; cudaEventCreate(&start_i); cudaEventCreate(&stop_i); cudaEventCreate(&start_mc_h2d); cudaEventCreate(&stop_mc_h2d); cudaEventCreate(&start_mc_d2h); cudaEventCreate(&stop_mc_d2h); cudaEventCreate(&start_pl); cudaEventCreate(&stop_pl); int i, j; float *A, *B, *C;// start, end; float *Ad, *Bd, *Cd; cudaEventRecord( start_i, 0 ); A = (float*)malloc(dimension*dimension*sizeof(float)); B = (float*)malloc(dimension*dimension*sizeof(float)); C = (float*)malloc(dimension*dimension*sizeof(float)); srand(292); for(i = 0; i < dimension; i++) for(j = 0; j < dimension; j++) { A[dimension*i+j] = (rand()/(RAND_MAX + 1.0)); B[dimension*i+j] = (rand()/(RAND_MAX + 1.0)); C[dimension*i+j] = 0.0; } cudaEventRecord( stop_i, 0 ); cudaEventSynchronize( stop_i ); cudaEventElapsedTime( &time_i, start_i, stop_i ); cudaEventRecord( start_mc_h2d, 0 ); cudaMalloc( (void**)&Ad, dimension*dimension*sizeof(float) ); cudaMemcpy( Ad, A, dimension*dimension*sizeof(float), cudaMemcpyHostToDevice ); cudaMalloc( (void**)&Bd, dimension*dimension*sizeof(float) ); cudaMemcpy( Bd, B, dimension*dimension*sizeof(float), cudaMemcpyHostToDevice ); cudaMalloc( (void**)&Cd, dimension*dimension*sizeof(float) ); cudaEventRecord( stop_mc_h2d, 0 ); cudaEventSynchronize( stop_mc_h2d ); cudaEventElapsedTime( &time_mc_h2d, start_mc_h2d, stop_mc_h2d ); //start = timestamp(); cudaEventRecord( start_pl, 0 ); dim3 threadBlock(blocksize,blocksize); dim3 grid(K,K); gpuSmMM<<<grid,threadBlock>>>( Ad,Bd,Cd,dimension); //end = timestamp(); cudaEventRecord( stop_pl, 0 ); cudaEventSynchronize( stop_pl ); cudaEventElapsedTime( &time_pl, start_pl, stop_pl ); cudaEventRecord( start_mc_d2h, 0 ); cudaMemcpy(C,Cd,dimension*dimension*sizeof(float),cudaMemcpyDeviceToHost); cudaEventRecord( stop_mc_d2h, 0 ); cudaEventSynchronize( stop_mc_d2h ); cudaEventElapsedTime( &time_mc_d2h, start_mc_d2h, stop_mc_d2h ); printf("IT: %f ", time_i); printf("MC: %f ", ( time_mc_d2h + time_mc_h2d ) ); printf("PLT: %f ", time_pl); printf("T:%f .... \n\n", (time_pl + time_mc_d2h + time_mc_h2d+time_i)); //printf("\nsecs:%f\n", end-start); cudaEventDestroy( start_i ); cudaEventDestroy( stop_i ); cudaEventDestroy( start_mc_d2h ); cudaEventDestroy( stop_mc_d2h ); cudaEventDestroy( start_mc_h2d ); cudaEventDestroy( stop_mc_h2d ); cudaEventDestroy( start_pl ); cudaEventDestroy( stop_pl ); free(A); free(B); free(C); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); return 0; }
487d2ae977d986854ce5d4d0b9aac21be4c255f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // $ nvcc -std=c++11 -I../.. graph_daxpy.cu -o graph_daxpy #include <cassert> #include <iostream> #include <chrono> #include <thrust/device_vector.h> __global__ void daxpy_kernel(int n, double a, const double* x, double* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void hello_world(int n, double a) { if(blockIdx.x == 0 and threadIdx.x == 0) { printf("Hello world!\n"); printf("n: %d\n", n); printf("a: %f\n", a); } } hipGraph_t make_daxpy_graph(int n, double a, const double* x, double* y) { hipGraph_t graph{}; if(hipError_t error = cudaGraphCreate(&graph, 0)) { throw std::runtime_error("make_daxpy_graph: CUDA error after cudaGraphCreate: " + std::string(hipGetErrorString(error))); } int block_size = 256; int num_blocks = (n + block_size - 1) / block_size; // introduce a kernel node void* kernel_params[] = {&n, &a, &x, &y}; cudaKernelNodeParams params { reinterpret_cast<void*>(daxpy_kernel), dim3(num_blocks), dim3(block_size), 0, kernel_params, nullptr }; hipGraphNode_t node{}; if(hipError_t error = cudaGraphAddKernelNode(&node, graph, nullptr, 0, &params)) { throw std::runtime_error("make_daxpy_graph: CUDA error after cudaGraphAddKernelNode: " + std::string(hipGetErrorString(error))); } return graph; } void test(size_t n) { thrust::device_vector<double> x(n, 1); thrust::device_vector<double> y(n, 2); double a = 2; // make the graph hipGraph_t graph = make_daxpy_graph(n, a, x.data().get(), y.data().get()); // instantiate the graph hipGraphExec_t executable_graph{}; if(hipError_t error = hipGraphInstantiate(&executable_graph, graph, nullptr, nullptr, 0)) { throw std::runtime_error("test: CUDA error after hipGraphInstantiate: " + std::string(hipGetErrorString(error))); } // create a stream hipStream_t stream{}; if(hipError_t error = hipStreamCreate(&stream)) { throw std::runtime_error("test: CUDA error after hipStreamCreate: " + std::string(hipGetErrorString(error))); } // launch the graph if(hipError_t error = hipGraphLaunch(executable_graph, stream)) { throw std::runtime_error("test: CUDA error after hipGraphLaunch: " + std::string(hipGetErrorString(error))); } // wait if(hipError_t error = hipStreamSynchronize(stream)) { throw std::runtime_error("test: CUDA error after hipStreamSynchronize: " + std::string(hipGetErrorString(error))); } // destroy resources if(hipError_t error = hipStreamDestroy(stream)) { throw std::runtime_error("test: CUDA error after hipStreamDestroy: " + std::string(hipGetErrorString(error))); } if(hipError_t error = hipGraphDestroy(graph)) { throw std::runtime_error("test: CUDA error after hipGraphDestroy: " + std::string(hipGetErrorString(error))); } if(hipError_t error = hipGraphExecDestroy(executable_graph)) { throw std::runtime_error("test: CUDA error after hipGraphExecDestroy: " + std::string(hipGetErrorString(error))); } // check the result thrust::device_vector<double> reference(n, 4); assert(reference == y); } double measure_bandwidth(size_t n, size_t num_trials = 100) { thrust::device_vector<double> x(n, 1); thrust::device_vector<double> y(n, 2); double a = 2; // make a stream hipStream_t stream; if(hipError_t error = hipStreamCreate(&stream)) { throw std::runtime_error("measure_bandwidth: CUDA error after hipStreamCreate: " + std::string(hipGetErrorString(error))); } // make the graph hipGraph_t graph = make_daxpy_graph(n, a, x.data().get(), y.data().get()); // instantiate the graph hipGraphExec_t executable_graph; if(hipError_t error = hipGraphInstantiate(&executable_graph, graph, nullptr, nullptr, 0)) { throw std::runtime_error("test: CUDA error after hipGraphInstantiate: " + std::string(hipGetErrorString(error))); } // time trials auto start = std::chrono::high_resolution_clock().now(); { for(size_t i = 0; i < num_trials; ++i) { hipGraphLaunch(executable_graph, stream); } if(hipError_t error = hipStreamSynchronize(stream)) { throw std::runtime_error("measure_bandwidth: CUDA error after hipStreamSynchronize: " + std::string(hipGetErrorString(error))); } } auto end = std::chrono::high_resolution_clock().now(); // compute mean GB/s size_t mean_nanoseconds = (std::chrono::duration_cast<std::chrono::nanoseconds>(end - start) / num_trials).count(); double mean_seconds = double(mean_nanoseconds) / 1000000000; size_t num_bytes = 2 * n * sizeof(double); double mean_bytes_per_second = double(num_bytes) / mean_seconds; double mean_gigabytes_per_second = mean_bytes_per_second / 1000000000; if(hipError_t error = hipGraphExecDestroy(executable_graph)) { throw std::runtime_error("measure_bandwidth: CUDA error after hipGraphExecDestroy: " + std::string(hipGetErrorString(error))); } if(hipError_t error = hipGraphDestroy(graph)) { throw std::runtime_error("measure_bandwidth: CUDA error after hipGraphDestroy: " + std::string(hipGetErrorString(error))); } if(hipError_t error = hipStreamDestroy(stream)) { throw std::runtime_error("measure_bandwidth: CUDA error after hipStreamDestroy: " + std::string(hipGetErrorString(error))); } return mean_gigabytes_per_second; } int main(int argc, char** argv) { size_t n = 1 << 25; if(argc > 1) { n = std::atoi(argv[1]); } // first test for correctness test(n); double bandwidth = measure_bandwidth(n); std::clog << n << ", " << bandwidth << std::endl; std::cout << "Graph DAXPY bandwidth: " << bandwidth << " GB/s" << std::endl; std::cout << "OK" << std::endl; return 0; }
487d2ae977d986854ce5d4d0b9aac21be4c255f6.cu
// $ nvcc -std=c++11 -I../.. graph_daxpy.cu -o graph_daxpy #include <cassert> #include <iostream> #include <chrono> #include <thrust/device_vector.h> __global__ void daxpy_kernel(int n, double a, const double* x, double* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { y[i] = a * x[i] + y[i]; } } __global__ void hello_world(int n, double a) { if(blockIdx.x == 0 and threadIdx.x == 0) { printf("Hello world!\n"); printf("n: %d\n", n); printf("a: %f\n", a); } } cudaGraph_t make_daxpy_graph(int n, double a, const double* x, double* y) { cudaGraph_t graph{}; if(cudaError_t error = cudaGraphCreate(&graph, 0)) { throw std::runtime_error("make_daxpy_graph: CUDA error after cudaGraphCreate: " + std::string(cudaGetErrorString(error))); } int block_size = 256; int num_blocks = (n + block_size - 1) / block_size; // introduce a kernel node void* kernel_params[] = {&n, &a, &x, &y}; cudaKernelNodeParams params { reinterpret_cast<void*>(daxpy_kernel), dim3(num_blocks), dim3(block_size), 0, kernel_params, nullptr }; cudaGraphNode_t node{}; if(cudaError_t error = cudaGraphAddKernelNode(&node, graph, nullptr, 0, &params)) { throw std::runtime_error("make_daxpy_graph: CUDA error after cudaGraphAddKernelNode: " + std::string(cudaGetErrorString(error))); } return graph; } void test(size_t n) { thrust::device_vector<double> x(n, 1); thrust::device_vector<double> y(n, 2); double a = 2; // make the graph cudaGraph_t graph = make_daxpy_graph(n, a, x.data().get(), y.data().get()); // instantiate the graph cudaGraphExec_t executable_graph{}; if(cudaError_t error = cudaGraphInstantiate(&executable_graph, graph, nullptr, nullptr, 0)) { throw std::runtime_error("test: CUDA error after cudaGraphInstantiate: " + std::string(cudaGetErrorString(error))); } // create a stream cudaStream_t stream{}; if(cudaError_t error = cudaStreamCreate(&stream)) { throw std::runtime_error("test: CUDA error after cudaStreamCreate: " + std::string(cudaGetErrorString(error))); } // launch the graph if(cudaError_t error = cudaGraphLaunch(executable_graph, stream)) { throw std::runtime_error("test: CUDA error after cudaGraphLaunch: " + std::string(cudaGetErrorString(error))); } // wait if(cudaError_t error = cudaStreamSynchronize(stream)) { throw std::runtime_error("test: CUDA error after cudaStreamSynchronize: " + std::string(cudaGetErrorString(error))); } // destroy resources if(cudaError_t error = cudaStreamDestroy(stream)) { throw std::runtime_error("test: CUDA error after cudaStreamDestroy: " + std::string(cudaGetErrorString(error))); } if(cudaError_t error = cudaGraphDestroy(graph)) { throw std::runtime_error("test: CUDA error after cudaGraphDestroy: " + std::string(cudaGetErrorString(error))); } if(cudaError_t error = cudaGraphExecDestroy(executable_graph)) { throw std::runtime_error("test: CUDA error after cudaGraphExecDestroy: " + std::string(cudaGetErrorString(error))); } // check the result thrust::device_vector<double> reference(n, 4); assert(reference == y); } double measure_bandwidth(size_t n, size_t num_trials = 100) { thrust::device_vector<double> x(n, 1); thrust::device_vector<double> y(n, 2); double a = 2; // make a stream cudaStream_t stream; if(cudaError_t error = cudaStreamCreate(&stream)) { throw std::runtime_error("measure_bandwidth: CUDA error after cudaStreamCreate: " + std::string(cudaGetErrorString(error))); } // make the graph cudaGraph_t graph = make_daxpy_graph(n, a, x.data().get(), y.data().get()); // instantiate the graph cudaGraphExec_t executable_graph; if(cudaError_t error = cudaGraphInstantiate(&executable_graph, graph, nullptr, nullptr, 0)) { throw std::runtime_error("test: CUDA error after cudaGraphInstantiate: " + std::string(cudaGetErrorString(error))); } // time trials auto start = std::chrono::high_resolution_clock().now(); { for(size_t i = 0; i < num_trials; ++i) { cudaGraphLaunch(executable_graph, stream); } if(cudaError_t error = cudaStreamSynchronize(stream)) { throw std::runtime_error("measure_bandwidth: CUDA error after cudaStreamSynchronize: " + std::string(cudaGetErrorString(error))); } } auto end = std::chrono::high_resolution_clock().now(); // compute mean GB/s size_t mean_nanoseconds = (std::chrono::duration_cast<std::chrono::nanoseconds>(end - start) / num_trials).count(); double mean_seconds = double(mean_nanoseconds) / 1000000000; size_t num_bytes = 2 * n * sizeof(double); double mean_bytes_per_second = double(num_bytes) / mean_seconds; double mean_gigabytes_per_second = mean_bytes_per_second / 1000000000; if(cudaError_t error = cudaGraphExecDestroy(executable_graph)) { throw std::runtime_error("measure_bandwidth: CUDA error after cudaGraphExecDestroy: " + std::string(cudaGetErrorString(error))); } if(cudaError_t error = cudaGraphDestroy(graph)) { throw std::runtime_error("measure_bandwidth: CUDA error after cudaGraphDestroy: " + std::string(cudaGetErrorString(error))); } if(cudaError_t error = cudaStreamDestroy(stream)) { throw std::runtime_error("measure_bandwidth: CUDA error after cudaStreamDestroy: " + std::string(cudaGetErrorString(error))); } return mean_gigabytes_per_second; } int main(int argc, char** argv) { size_t n = 1 << 25; if(argc > 1) { n = std::atoi(argv[1]); } // first test for correctness test(n); double bandwidth = measure_bandwidth(n); std::clog << n << ", " << bandwidth << std::endl; std::cout << "Graph DAXPY bandwidth: " << bandwidth << " GB/s" << std::endl; std::cout << "OK" << std::endl; return 0; }
a4bcbc38b146c67b4e1b776e2ade7e83cb4ed38a.hip
// !!! This is a file automatically generated by hipify!!! // -*- C++ -*- // -*- coding: utf-8 -*- // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // michael a.g. avzis // california institute of technology // (c) 1998-2010 all rights reserved // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // memxchng.cu #include <hip/hip_runtime.h> #include <assert.h> int main(int argc, char* argv[]) { const int N = 12; // allocate some buffers on the host float *send_host = (float *) malloc(N*sizeof(float)); float *recv_host = (float *) malloc(N*sizeof(float)); // allocate matching ones on the device float *send_device, *recv_device; hipMalloc((void **) &recv_device, N*sizeof(float)); hipMalloc((void **) &send_device, N*sizeof(float)); // and initialize the host data for (int i=0; i<N; i++) { send_host[i] = 2.0f + i*i; recv_host[i] = 0.0f; } // send the data from the host to the device hipMemcpy(recv_device, send_host, N*sizeof(float), hipMemcpyHostToDevice); // move the data in device memory hipMemcpy(send_device, recv_device, N*sizeof(float), hipMemcpyDeviceToDevice); // get it back on the host hipMemcpy(recv_host, send_device, N*sizeof(float), hipMemcpyDeviceToHost); // check the result for (int i=0; i<N; i++) { assert(send_host[i] == recv_host[i]); } // free the buffers; free(send_host); free(recv_host); hipFree(send_device); hipFree(recv_device); return 0; } // end of file
a4bcbc38b146c67b4e1b776e2ade7e83cb4ed38a.cu
// -*- C++ -*- // -*- coding: utf-8 -*- // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // michael a.g. aïvázis // california institute of technology // (c) 1998-2010 all rights reserved // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // memxchng.cu #include <cuda.h> #include <assert.h> int main(int argc, char* argv[]) { const int N = 12; // allocate some buffers on the host float *send_host = (float *) malloc(N*sizeof(float)); float *recv_host = (float *) malloc(N*sizeof(float)); // allocate matching ones on the device float *send_device, *recv_device; cudaMalloc((void **) &recv_device, N*sizeof(float)); cudaMalloc((void **) &send_device, N*sizeof(float)); // and initialize the host data for (int i=0; i<N; i++) { send_host[i] = 2.0f + i*i; recv_host[i] = 0.0f; } // send the data from the host to the device cudaMemcpy(recv_device, send_host, N*sizeof(float), cudaMemcpyHostToDevice); // move the data in device memory cudaMemcpy(send_device, recv_device, N*sizeof(float), cudaMemcpyDeviceToDevice); // get it back on the host cudaMemcpy(recv_host, send_device, N*sizeof(float), cudaMemcpyDeviceToHost); // check the result for (int i=0; i<N; i++) { assert(send_host[i] == recv_host[i]); } // free the buffers; free(send_host); free(recv_host); cudaFree(send_device); cudaFree(recv_device); return 0; } // end of file
9464394f141d9997e41d081ea438be6ef41aea8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); loss = count==0? 0 : loss / count; //loss /= count; } else { loss = loss / outer_num_; //loss /= outer_num_; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); caffe_gpu_scal(prob_.count(), count==0? 0 : loss_weight / count, //HYQ bottom_diff); } else { caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
9464394f141d9997e41d081ea438be6ef41aea8f.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); loss = count==0? 0 : loss / count; //loss /= count; } else { loss = loss / outer_num_; //loss /= outer_num_; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); caffe_gpu_scal(prob_.count(), count==0? 0 : loss_weight / count, //HYQ bottom_diff); } else { caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
63300c99219fdcdbdb131362622d7bb31c4b0990.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // SkyNet Project // Copyright (C) 2018 by Contributors <https://github.com/Tyill/skynet> // // This code is licensed under the MIT License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files(the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and / or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions : // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #include "../stdafx.h" #include "snOperatorCUDA/src/Operator/lossFunction.h" using namespace std; using namespace SN_Base; __global__ void softMaxACrossEntropyFwd(snSize iosz, snFloat* inout){ size_t inStepByD = iosz.w * iosz.h, // step out by input inStepByN = inStepByD * iosz.d; // step out by batch // gridDim.x - number of out layers inout += blockIdx.x * inStepByN; __shared__ int tmax; __shared__ snFloat tsumm; tmax = 0; tsumm = 0; __syncthreads(); unsigned int i = threadIdx.x; while (i < inStepByN){ atomicMax(&tmax, int(inout[i] * 100.F)); // TODO redo to reduction i += blockDim.x; } __syncthreads(); i = threadIdx.x; while (i < inStepByN){ inout[i] = ((inout[i] - tmax / 100.F) > -20) ? exp(inout[i] - tmax / 100.F) : 0.1E-8F; atomicAdd(&tsumm, inout[i]); // TODO redo to reduction i += blockDim.x; } __syncthreads(); i = threadIdx.x; while (i < inStepByN){ inout[i] /= tsumm; i += blockDim.x; } } __global__ void softMaxACrossEntropyBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ size_t inStepByD = iosz.w * iosz.h, // step out by input inStepByN = inStepByD * iosz.d; // step out by batch // gridDim.x - number of out layers // gridDim.y - batch size grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; unsigned int i = threadIdx.x; while (i < inStepByD){ grad[i] = out[i] - targ[i]; i += blockDim.x; } } __global__ void binaryCrossEntropyBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ size_t inStepByD = iosz.w * iosz.h, // step out by input inStepByN = inStepByD * iosz.d; // step out by batch // gridDim.x - number of out layers // gridDim.y - batch size grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; unsigned int i = threadIdx.x; while (i < inStepByD){ grad[i] = (out[i] - targ[i]) / (out[i] * (1.F - out[i])); i += blockDim.x; } } __global__ void regressionMSEBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ size_t inStepByD = iosz.w * iosz.h, // step out by input inStepByN = inStepByD * iosz.d; // step out by batch // gridDim.x - number of out layers // gridDim.y - batch size grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; unsigned int i = threadIdx.x; while (i < inStepByD){ grad[i] = 2 * (out[i] - targ[i]) / inStepByN; i += blockDim.x; } } void lossForward(const snSize& sz, snFloat* inout, lossType loss){ dim3 dimBlock(256); dim3 dimGrid(int(sz.n)); switch (loss){ case lossType::softMaxACrossEntropy: hipLaunchKernelGGL(( softMaxACrossEntropyFwd) , dim3(dimGrid), dim3(dimBlock) , 0, 0, sz, inout); break; case lossType::binaryCrossEntropy: break; case lossType::regressionMSE: break; } } void lossBackward(const snSize& sz, snFloat* out, snFloat* targ, snFloat* grad, lossType loss){ dim3 dimBlock(128); dim3 dimGrid(int(sz.d), int(sz.n)); switch (loss){ case lossType::softMaxACrossEntropy: softMaxACrossEntropyBwd << <dimGrid, dimBlock >> >(sz, out, targ, grad); break; case lossType::binaryCrossEntropy: binaryCrossEntropyBwd << <dimGrid, dimBlock >> >(sz, out, targ, grad); break; case lossType::regressionMSE: // Mean Square Error regressionMSEBwd << <dimGrid, dimBlock >> >(sz, out, targ, grad); break; } }
63300c99219fdcdbdb131362622d7bb31c4b0990.cu
// // SkyNet Project // Copyright (C) 2018 by Contributors <https://github.com/Tyill/skynet> // // This code is licensed under the MIT License. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files(the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and / or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions : // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #include "../stdafx.h" #include "snOperatorCUDA/src/Operator/lossFunction.h" using namespace std; using namespace SN_Base; __global__ void softMaxACrossEntropyFwd(snSize iosz, snFloat* inout){ size_t inStepByD = iosz.w * iosz.h, // step out by input inStepByN = inStepByD * iosz.d; // step out by batch // gridDim.x - number of out layers inout += blockIdx.x * inStepByN; __shared__ int tmax; __shared__ snFloat tsumm; tmax = 0; tsumm = 0; __syncthreads(); unsigned int i = threadIdx.x; while (i < inStepByN){ atomicMax(&tmax, int(inout[i] * 100.F)); // TODO redo to reduction i += blockDim.x; } __syncthreads(); i = threadIdx.x; while (i < inStepByN){ inout[i] = ((inout[i] - tmax / 100.F) > -20) ? exp(inout[i] - tmax / 100.F) : 0.1E-8F; atomicAdd(&tsumm, inout[i]); // TODO redo to reduction i += blockDim.x; } __syncthreads(); i = threadIdx.x; while (i < inStepByN){ inout[i] /= tsumm; i += blockDim.x; } } __global__ void softMaxACrossEntropyBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ size_t inStepByD = iosz.w * iosz.h, // step out by input inStepByN = inStepByD * iosz.d; // step out by batch // gridDim.x - number of out layers // gridDim.y - batch size grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; unsigned int i = threadIdx.x; while (i < inStepByD){ grad[i] = out[i] - targ[i]; i += blockDim.x; } } __global__ void binaryCrossEntropyBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ size_t inStepByD = iosz.w * iosz.h, // step out by input inStepByN = inStepByD * iosz.d; // step out by batch // gridDim.x - number of out layers // gridDim.y - batch size grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; unsigned int i = threadIdx.x; while (i < inStepByD){ grad[i] = (out[i] - targ[i]) / (out[i] * (1.F - out[i])); i += blockDim.x; } } __global__ void regressionMSEBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ size_t inStepByD = iosz.w * iosz.h, // step out by input inStepByN = inStepByD * iosz.d; // step out by batch // gridDim.x - number of out layers // gridDim.y - batch size grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; unsigned int i = threadIdx.x; while (i < inStepByD){ grad[i] = 2 * (out[i] - targ[i]) / inStepByN; i += blockDim.x; } } void lossForward(const snSize& sz, snFloat* inout, lossType loss){ dim3 dimBlock(256); dim3 dimGrid(int(sz.n)); switch (loss){ case lossType::softMaxACrossEntropy: softMaxACrossEntropyFwd <<<dimGrid, dimBlock >>>(sz, inout); break; case lossType::binaryCrossEntropy: break; case lossType::regressionMSE: break; } } void lossBackward(const snSize& sz, snFloat* out, snFloat* targ, snFloat* grad, lossType loss){ dim3 dimBlock(128); dim3 dimGrid(int(sz.d), int(sz.n)); switch (loss){ case lossType::softMaxACrossEntropy: softMaxACrossEntropyBwd << <dimGrid, dimBlock >> >(sz, out, targ, grad); break; case lossType::binaryCrossEntropy: binaryCrossEntropyBwd << <dimGrid, dimBlock >> >(sz, out, targ, grad); break; case lossType::regressionMSE: // Mean Square Error regressionMSEBwd << <dimGrid, dimBlock >> >(sz, out, targ, grad); break; } }
e4d25f6d3c86aeb57de0370e167454f1fb933a3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * EnergyMomentumTensor.cu * * Created on: Oct 22, 2015 * Author: bazow */ #include <math.h> // for math functions #include "edu/osu/rhic/trunk/hydro/EnergyMomentumTensor.cuh" #include "edu/osu/rhic/trunk/hydro/DynamicalVariables.cuh" #include "edu/osu/rhic/harness/lattice/LatticeParameters.h" #include "edu/osu/rhic/harness/init/CudaConfiguration.cuh" #include "edu/osu/rhic/trunk/eos/EquationOfState.cuh" #define MAX_ITERS 1000000 //const PRECISION ACC = 1e-2; __host__ __device__ PRECISION energyDensityFromConservedVariables(PRECISION ePrev, PRECISION M0, PRECISION M, PRECISION Pi) { #ifndef CONFORMAL_EOS PRECISION e0 = ePrev; // initial guess for energy density for(int j = 0; j < MAX_ITERS; ++j) { PRECISION p = equilibriumPressure(e0); PRECISION cs2 = speedOfSoundSquared(e0); PRECISION cst2 = p/e0; PRECISION A = fmaf(M0,1-cst2,Pi); PRECISION B = fmaf(M0,M0+Pi,-M); PRECISION H = sqrtf(fabsf(A*A+4*cst2*B)); PRECISION D = (A-H)/(2*cst2); PRECISION f = e0 + D; PRECISION fp = 1 - ((cs2 - cst2)*(B + D*H - ((cs2 - cst2)*cst2*D*M0)/e0))/(cst2*e0*H); PRECISION converg_factor = 0.9; PRECISION e; if (j < MAX_ITERS / 2) e = e0 - f/fp; else e = e0 - converg_factor * f/fp; if(fabsf(e - e0) <= 0.001 * fabsf(e)) return e; e0 = e; } // printf("Maximum number of iterations exceeded.\n"); printf("Maximum number of iterations exceeded.\tePrev=%.3f,\tM0=%.3f,\t M=%.3f,\t Pi=%.3f \n", ePrev, M0, M, Pi); return e0; #else return fabsf(sqrtf(fabsf(4 * M0 * M0 - 3 * M)) - M0); #endif } __host__ __device__ void getInferredVariables(PRECISION t, const PRECISION * const __restrict__ q, PRECISION ePrev, PRECISION * const __restrict__ e, PRECISION * const __restrict__ p, PRECISION * const __restrict__ ut, PRECISION * const __restrict__ ux, PRECISION * const __restrict__ uy, PRECISION * const __restrict__ un ) { PRECISION ttt = q[0]; PRECISION ttx = q[1]; PRECISION tty = q[2]; PRECISION ttn = q[3]; #ifdef PIMUNU PRECISION pitt = q[4]; PRECISION pitx = q[5]; PRECISION pity = q[6]; PRECISION pitn = q[7]; #else PRECISION pitt = 0; PRECISION pitx = 0; PRECISION pity = 0; PRECISION pitn = 0; #endif // \Pi #ifdef PI PRECISION Pi = q[14]; #else PRECISION Pi = 0; #endif PRECISION M0 = ttt - pitt; PRECISION M1 = ttx - pitx; PRECISION M2 = tty - pity; PRECISION M3 = ttn - pitn; PRECISION M = M1 * M1 + M2 * M2 + t * t * M3 * M3; #ifdef Pi if ((M0 * M0 - M + M0 * Pi) < 0) Pi = M / M0 - M0; #endif /****************************************************************************/ if (ePrev <= 0.1) { *e = M0 - M / M0; } else { *e = energyDensityFromConservedVariables(ePrev, M0, M, Pi); } if (isnan(*e)) { printf("\n e is nan. M0=%.3f,\t M1=%.3f,\t M2=%.3f,\t M3=%.3f,\t ttt=%.3f,\t ttx=%.3f,\t tty=%.3f,\t ttn=%.3f, \tpitt=%.3f,\t pitx=%.3f,\t pity=%.3f,\t pitn=%.3f\n", M0, M1, M2, M3, ttt, ttx, tty, ttn, pitt, pitx, pity, pitn); } *p = equilibriumPressure(*e); if (*e < 1.e-7) { *e = 1.e-7; *p = 1.e-7; } PRECISION P = *p + Pi; PRECISION E = 1/(*e + P); *ut = sqrtf(fabsf((M0 + P) * E)); PRECISION E2 = E/(*ut); *ux = M1 * E2; *uy = M2 * E2; *un = M3 * E2; } __global__ void setInferredVariablesKernel(const CONSERVED_VARIABLES * const __restrict__ q, PRECISION * const __restrict__ e, PRECISION * const __restrict__ p, FLUID_VELOCITY * const __restrict__ u, PRECISION t ) { unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < d_nElements) { unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M; unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M; unsigned int i = threadID % d_nx + N_GHOST_CELLS_M; unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy); PRECISION q_s[NUMBER_CONSERVED_VARIABLES]; q_s[0] = q->ttt[s]; q_s[1] = q->ttx[s]; q_s[2] = q->tty[s]; q_s[3] = q->ttn[s]; #ifdef PIMUNU q_s[4] = q->pitt[s]; q_s[5] = q->pitx[s]; q_s[6] = q->pity[s]; q_s[7] = q->pitn[s]; /****************************************************************************\ q_s[8] = q->pixx[s]; q_s[9] = q->pixy[s]; q_s[10] = q->pixn[s]; q_s[11] = q->piyy[s]; q_s[12] = q->piyn[s]; q_s[13] = q->pinn[s]; /****************************************************************************/ #endif #ifdef PI q_s[14] = q->Pi[s]; #endif PRECISION _e, _p, ut, ux, uy, un; getInferredVariables(t, q_s, e[s], &_e, &_p, &ut, &ux, &uy, &un); e[s] = _e; p[s] = _p; u->ut[s] = ut; u->ux[s] = ux; u->uy[s] = uy; u->un[s] = un; } } //=================================================================== // Components of T^{\mu\nu} in (\tau,x,y,\eta_s)-coordinates //=================================================================== __host__ __device__ PRECISION Ttt(PRECISION e, PRECISION p, PRECISION ut, PRECISION pitt) { return (e+p)*ut*ut-p+pitt; } __host__ __device__ PRECISION Ttx(PRECISION e, PRECISION p, PRECISION ut, PRECISION ux, PRECISION pitx) { return (e+p)*ut*ux+pitx; } __host__ __device__ PRECISION Tty(PRECISION e, PRECISION p, PRECISION ut, PRECISION uy, PRECISION pity) { return (e+p)*ut*uy+pity; } __host__ __device__ PRECISION Ttn(PRECISION e, PRECISION p, PRECISION ut, PRECISION un, PRECISION pitn) { return (e+p)*ut*un+pitn; } __host__ __device__ PRECISION Txx(PRECISION e, PRECISION p, PRECISION ux, PRECISION pixx) { return (e+p)*ux*ux+p+pixx; } __host__ __device__ PRECISION Txy(PRECISION e, PRECISION p, PRECISION ux, PRECISION uy, PRECISION pixy) { return (e+p)*ux*uy+pixy; } __host__ __device__ PRECISION Txn(PRECISION e, PRECISION p, PRECISION ux, PRECISION un, PRECISION pixn) { return (e+p)*ux*un+pixn; } __host__ __device__ PRECISION Tyy(PRECISION e, PRECISION p, PRECISION uy, PRECISION piyy) { return (e+p)*uy*uy+p+piyy; } __host__ __device__ PRECISION Tyn(PRECISION e, PRECISION p, PRECISION uy, PRECISION un, PRECISION piyn) { return (e+p)*uy*un+piyn; } __host__ __device__ PRECISION Tnn(PRECISION e, PRECISION p, PRECISION un, PRECISION pinn, PRECISION t) { return (e+p)*un*un+p/t/t+pinn; }
e4d25f6d3c86aeb57de0370e167454f1fb933a3f.cu
/* * EnergyMomentumTensor.cu * * Created on: Oct 22, 2015 * Author: bazow */ #include <math.h> // for math functions #include "edu/osu/rhic/trunk/hydro/EnergyMomentumTensor.cuh" #include "edu/osu/rhic/trunk/hydro/DynamicalVariables.cuh" #include "edu/osu/rhic/harness/lattice/LatticeParameters.h" #include "edu/osu/rhic/harness/init/CudaConfiguration.cuh" #include "edu/osu/rhic/trunk/eos/EquationOfState.cuh" #define MAX_ITERS 1000000 //const PRECISION ACC = 1e-2; __host__ __device__ PRECISION energyDensityFromConservedVariables(PRECISION ePrev, PRECISION M0, PRECISION M, PRECISION Pi) { #ifndef CONFORMAL_EOS PRECISION e0 = ePrev; // initial guess for energy density for(int j = 0; j < MAX_ITERS; ++j) { PRECISION p = equilibriumPressure(e0); PRECISION cs2 = speedOfSoundSquared(e0); PRECISION cst2 = p/e0; PRECISION A = fmaf(M0,1-cst2,Pi); PRECISION B = fmaf(M0,M0+Pi,-M); PRECISION H = sqrtf(fabsf(A*A+4*cst2*B)); PRECISION D = (A-H)/(2*cst2); PRECISION f = e0 + D; PRECISION fp = 1 - ((cs2 - cst2)*(B + D*H - ((cs2 - cst2)*cst2*D*M0)/e0))/(cst2*e0*H); PRECISION converg_factor = 0.9; PRECISION e; if (j < MAX_ITERS / 2) e = e0 - f/fp; else e = e0 - converg_factor * f/fp; if(fabsf(e - e0) <= 0.001 * fabsf(e)) return e; e0 = e; } // printf("Maximum number of iterations exceeded.\n"); printf("Maximum number of iterations exceeded.\tePrev=%.3f,\tM0=%.3f,\t M=%.3f,\t Pi=%.3f \n", ePrev, M0, M, Pi); return e0; #else return fabsf(sqrtf(fabsf(4 * M0 * M0 - 3 * M)) - M0); #endif } __host__ __device__ void getInferredVariables(PRECISION t, const PRECISION * const __restrict__ q, PRECISION ePrev, PRECISION * const __restrict__ e, PRECISION * const __restrict__ p, PRECISION * const __restrict__ ut, PRECISION * const __restrict__ ux, PRECISION * const __restrict__ uy, PRECISION * const __restrict__ un ) { PRECISION ttt = q[0]; PRECISION ttx = q[1]; PRECISION tty = q[2]; PRECISION ttn = q[3]; #ifdef PIMUNU PRECISION pitt = q[4]; PRECISION pitx = q[5]; PRECISION pity = q[6]; PRECISION pitn = q[7]; #else PRECISION pitt = 0; PRECISION pitx = 0; PRECISION pity = 0; PRECISION pitn = 0; #endif // \Pi #ifdef PI PRECISION Pi = q[14]; #else PRECISION Pi = 0; #endif PRECISION M0 = ttt - pitt; PRECISION M1 = ttx - pitx; PRECISION M2 = tty - pity; PRECISION M3 = ttn - pitn; PRECISION M = M1 * M1 + M2 * M2 + t * t * M3 * M3; #ifdef Pi if ((M0 * M0 - M + M0 * Pi) < 0) Pi = M / M0 - M0; #endif /****************************************************************************/ if (ePrev <= 0.1) { *e = M0 - M / M0; } else { *e = energyDensityFromConservedVariables(ePrev, M0, M, Pi); } if (isnan(*e)) { printf("\n e is nan. M0=%.3f,\t M1=%.3f,\t M2=%.3f,\t M3=%.3f,\t ttt=%.3f,\t ttx=%.3f,\t tty=%.3f,\t ttn=%.3f, \tpitt=%.3f,\t pitx=%.3f,\t pity=%.3f,\t pitn=%.3f\n", M0, M1, M2, M3, ttt, ttx, tty, ttn, pitt, pitx, pity, pitn); } *p = equilibriumPressure(*e); if (*e < 1.e-7) { *e = 1.e-7; *p = 1.e-7; } PRECISION P = *p + Pi; PRECISION E = 1/(*e + P); *ut = sqrtf(fabsf((M0 + P) * E)); PRECISION E2 = E/(*ut); *ux = M1 * E2; *uy = M2 * E2; *un = M3 * E2; } __global__ void setInferredVariablesKernel(const CONSERVED_VARIABLES * const __restrict__ q, PRECISION * const __restrict__ e, PRECISION * const __restrict__ p, FLUID_VELOCITY * const __restrict__ u, PRECISION t ) { unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < d_nElements) { unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M; unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M; unsigned int i = threadID % d_nx + N_GHOST_CELLS_M; unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy); PRECISION q_s[NUMBER_CONSERVED_VARIABLES]; q_s[0] = q->ttt[s]; q_s[1] = q->ttx[s]; q_s[2] = q->tty[s]; q_s[3] = q->ttn[s]; #ifdef PIMUNU q_s[4] = q->pitt[s]; q_s[5] = q->pitx[s]; q_s[6] = q->pity[s]; q_s[7] = q->pitn[s]; /****************************************************************************\ q_s[8] = q->pixx[s]; q_s[9] = q->pixy[s]; q_s[10] = q->pixn[s]; q_s[11] = q->piyy[s]; q_s[12] = q->piyn[s]; q_s[13] = q->pinn[s]; /****************************************************************************/ #endif #ifdef PI q_s[14] = q->Pi[s]; #endif PRECISION _e, _p, ut, ux, uy, un; getInferredVariables(t, q_s, e[s], &_e, &_p, &ut, &ux, &uy, &un); e[s] = _e; p[s] = _p; u->ut[s] = ut; u->ux[s] = ux; u->uy[s] = uy; u->un[s] = un; } } //=================================================================== // Components of T^{\mu\nu} in (\tau,x,y,\eta_s)-coordinates //=================================================================== __host__ __device__ PRECISION Ttt(PRECISION e, PRECISION p, PRECISION ut, PRECISION pitt) { return (e+p)*ut*ut-p+pitt; } __host__ __device__ PRECISION Ttx(PRECISION e, PRECISION p, PRECISION ut, PRECISION ux, PRECISION pitx) { return (e+p)*ut*ux+pitx; } __host__ __device__ PRECISION Tty(PRECISION e, PRECISION p, PRECISION ut, PRECISION uy, PRECISION pity) { return (e+p)*ut*uy+pity; } __host__ __device__ PRECISION Ttn(PRECISION e, PRECISION p, PRECISION ut, PRECISION un, PRECISION pitn) { return (e+p)*ut*un+pitn; } __host__ __device__ PRECISION Txx(PRECISION e, PRECISION p, PRECISION ux, PRECISION pixx) { return (e+p)*ux*ux+p+pixx; } __host__ __device__ PRECISION Txy(PRECISION e, PRECISION p, PRECISION ux, PRECISION uy, PRECISION pixy) { return (e+p)*ux*uy+pixy; } __host__ __device__ PRECISION Txn(PRECISION e, PRECISION p, PRECISION ux, PRECISION un, PRECISION pixn) { return (e+p)*ux*un+pixn; } __host__ __device__ PRECISION Tyy(PRECISION e, PRECISION p, PRECISION uy, PRECISION piyy) { return (e+p)*uy*uy+p+piyy; } __host__ __device__ PRECISION Tyn(PRECISION e, PRECISION p, PRECISION uy, PRECISION un, PRECISION piyn) { return (e+p)*uy*un+piyn; } __host__ __device__ PRECISION Tnn(PRECISION e, PRECISION p, PRECISION un, PRECISION pinn, PRECISION t) { return (e+p)*un*un+p/t/t+pinn; }
b886c260ca04460af42f3686ba0e1c800bd508f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cstring> #include <time.h> __global__ void mem_transf_test(int *input){ int gid = blockIdx.x * blockDim.x + threadIdx.x; printf("tid: %d, gid: %d, value: %d\n", threadIdx.x, gid, input[gid]); } int main(){ int size = 128; int byte_size = size * sizeof(int); int *h_input; h_input = (int *) malloc(byte_size); // randomly initialize variable time_t t; srand((unsigned) time(&t)); for(int i = 0; i < size; i++){ h_input[i] = (int) (rand() & 0xff); } int *d_input; hipMalloc((void **)&d_input, byte_size); hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice); dim3 block(64); dim3 grid(2); hipLaunchKernelGGL(( mem_transf_test) , dim3(grid), dim3(block), 0, 0, d_input); hipDeviceSynchronize(); hipFree(d_input); free(h_input); hipDeviceReset(); return 0; }
b886c260ca04460af42f3686ba0e1c800bd508f3.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cstring> #include <time.h> __global__ void mem_transf_test(int *input){ int gid = blockIdx.x * blockDim.x + threadIdx.x; printf("tid: %d, gid: %d, value: %d\n", threadIdx.x, gid, input[gid]); } int main(){ int size = 128; int byte_size = size * sizeof(int); int *h_input; h_input = (int *) malloc(byte_size); // randomly initialize variable time_t t; srand((unsigned) time(&t)); for(int i = 0; i < size; i++){ h_input[i] = (int) (rand() & 0xff); } int *d_input; cudaMalloc((void **)&d_input, byte_size); cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice); dim3 block(64); dim3 grid(2); mem_transf_test <<<grid, block>>>(d_input); cudaDeviceSynchronize(); cudaFree(d_input); free(h_input); cudaDeviceReset(); return 0; }
8bf35a37d649eb5bfc59bfed822fe52d86e85926.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. /*! \file DPDLJThermoDriverPotentialPairGPU.cu \brief Defines the driver functions for computing all types of pair forces on the GPU */ #include "EvaluatorPairDPDLJThermo.h" #include "AllDriverPotentialPairGPU.cuh" hipError_t gpu_compute_dpdljthermodpd_forces(const dpd_pair_args_t& args, const Scalar4 *d_params) { return gpu_compute_dpd_forces<EvaluatorPairDPDLJThermo>(args, d_params); } hipError_t gpu_compute_dpdljthermo_forces(const pair_args_t& args, const Scalar4 *d_params) { return gpu_compute_pair_forces<EvaluatorPairDPDLJThermo>(args, d_params); }
8bf35a37d649eb5bfc59bfed822fe52d86e85926.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. /*! \file DPDLJThermoDriverPotentialPairGPU.cu \brief Defines the driver functions for computing all types of pair forces on the GPU */ #include "EvaluatorPairDPDLJThermo.h" #include "AllDriverPotentialPairGPU.cuh" cudaError_t gpu_compute_dpdljthermodpd_forces(const dpd_pair_args_t& args, const Scalar4 *d_params) { return gpu_compute_dpd_forces<EvaluatorPairDPDLJThermo>(args, d_params); } cudaError_t gpu_compute_dpdljthermo_forces(const pair_args_t& args, const Scalar4 *d_params) { return gpu_compute_pair_forces<EvaluatorPairDPDLJThermo>(args, d_params); }
f439714a094aa537036c4cab483b3dc29336f8b3.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <thrust/complex.h> #include <algorithm> //#include <thrust> using namespace thrust; #include "ragridder_plan.h" #include "conv_interp_invoker.h" #include "cuft.h" #include "deconv.h" #include "cugridder.h" #include "precomp.h" #include "utils.h" int main(int argc, char *argv[]) { /* Input: M, N1, N2, epsilon method method - conv method M - number of randomly distributed points N1, N2 - output size epsilon - tolerance */ int ier = 0; if (argc < 4) { fprintf(stderr, "Usage: W Stacking\n" "Arguments:\n" " N1, N2 : image size.\n" " M: The number of randomly distributed points.\n" " epsilon: NUFFT tolerance (default 1e-6).\n" " kerevalmeth: Kernel evaluation method; one of\n" " 0: Exponential of square root (default), or\n" " 1: Horner evaluation.\n" " method: One of\n" " 0: nupts driven (default),\n" " 2: sub-problem, or\n"); return 1; } int N1, N2; PCS sigma = 2.0; // upsampling factor int M; double inp; sscanf(argv[1], "%d", &N1); sscanf(argv[2], "%d", &N2); sscanf(argv[3], "%d", &M); PCS epsilon = 1e-6; if(argc>4){ sscanf(argv[4], "%lf", &inp); epsilon = inp; } int kerevalmeth = 0; if(argc>5)sscanf(argv[5], "%d", &kerevalmeth); int method=0; if(argc>6)sscanf(argv[6], "%d", &method); //gpu_method == 0, nupts driven //int ier; PCS *u, *v; CPX *c; u = (PCS *)malloc(M * sizeof(PCS)); //Allocates page-locked memory on the host. v = (PCS *)malloc(M * sizeof(PCS)); PCS *d_u, *d_v; CUCPX *d_c, *d_fk; CUCPX *d_fw; checkCudaErrors(hipMalloc(&d_u, M * sizeof(PCS))); checkCudaErrors(hipMalloc(&d_v, M * sizeof(PCS))); checkCudaErrors(hipMalloc(&d_c, M * sizeof(CUCPX))); checkCudaErrors(hipMalloc(&d_fk, N1*N2*sizeof(CUCPX))); // generating data for (int i = 0; i < M; i++) { u[i] = randm11()*PI; //xxxxx printf("%lf ",u[i]); v[i] = randm11()*PI; // wgt[i] = 1; } printf("\n"); CPX *fk = (CPX*) malloc(sizeof(CPX)*N1*N2); for(int i=0; i<N1*N2; i++){ fk[i].real(1.0); fk[i].imag(1.0); } // double a[5] = {-PI/2, -PI/3, 0, PI/3, PI/2}; // change to random data // for(int i=0; i<M; i++){ // u[i] = a[i/5]; // v[i] = a[i%5]; // } #ifdef DEBUG printf("origial input data...\n"); for(int i=0; i<M; i++){ printf("%.3lf ",u[i]); } printf("\n"); for(int i=0; i<M; i++){ printf("%.3lf ",c[i].real()); } printf("\n"); #endif // ignore the tdirty // how to convert ms to vis //printf("generated data, x[1] %2.2g, y[1] %2.2g , z[1] %2.2g, c[1] %2.2g\n",x[1] , y[1], z[1], c[1].real()); // Timing begin //data transfer checkCudaErrors(hipMemcpy(d_u, u, M * sizeof(PCS), hipMemcpyHostToDevice)); //u checkCudaErrors(hipMemcpy(d_v, v, M * sizeof(PCS), hipMemcpyHostToDevice)); //v checkCudaErrors(hipMemcpy(d_fk, fk, N1 * N2 * sizeof(CUCPX), hipMemcpyHostToDevice)); checkCudaErrors(hipMemset(d_c, 0, M * sizeof(CUCPX))); /* ----------Step2: plan setting------------*/ curafft_plan *plan; plan = new curafft_plan(); memset(plan, 0, sizeof(curafft_plan)); int direction = 0; // opts and copts setting plan->opts.gpu_device_id = 0; plan->opts.upsampfac = sigma; plan->opts.gpu_sort = 1; plan->opts.gpu_binsizex = -1; plan->opts.gpu_binsizey = -1; plan->opts.gpu_binsizez = -1; plan->opts.gpu_kerevalmeth = kerevalmeth; plan->opts.gpu_conv_only = 0; plan->opts.gpu_gridder_method = method; plan->type = 2; ier = setup_conv_opts(plan->copts, epsilon, sigma, 1, direction, kerevalmeth); //check the arguements if(ier!=0)printf("setup_error\n"); // plan setting // cuda stream malloc in setup_plan int nf1 = get_num_cells(N1,plan->copts); int nf2 = get_num_cells(N2,plan->copts); plan->dim = 2; setup_plan(nf1, nf2, 1, M, d_u, d_v, NULL, d_c, plan); plan->ms = N1; plan->mt = N2; plan->mu = 1; plan->execute_flow = 1; int iflag = -1; int fftsign = (iflag>=0) ? 1 : -1; plan->iflag = fftsign; //may be useless| conflict with direction plan->batchsize = 1; plan->copts.direction = direction; // related to type // // fw allocation // checkCudaErrors(hipMalloc((void**)&plan->fw,sizeof(CUCPX)*nf1*nf2*nf3)); // PCS *fwkerhalf1 = (PCS*)malloc(sizeof(PCS)*(plan->nf1/2+1)); // onedim_fseries_kernel_seq(plan->nf1, fwkerhalf1, plan->copts); // used for correction // PCS *fwkerhalf2 = (PCS*)malloc(sizeof(PCS)*(plan->nf2/2+1)); // onedim_fseries_kernel_seq(plan->nf2, fwkerhalf2, plan->copts); fourier_series_appro_invoker(plan->fwkerhalf1, plan->copts, plan->nf1/2+1); printf("correction factor printing...\n"); PCS *corr = (PCS*) malloc(sizeof(PCS)*(plan->nf1/2+1)); checkCudaErrors(hipMemcpy(corr,plan->fwkerhalf1,sizeof(PCS)*(plan->nf1/2+1),hipMemcpyDeviceToHost)); for(int i=0; i<10; i++){ printf("%.3lf ",corr[i]); } printf("\n"); fourier_series_appro_invoker(plan->fwkerhalf2, plan->copts, plan->nf2/2+1); #ifdef DEBUG printf("nf1, nf2 %d %d\n",plan->nf1,plan->nf2); printf("copts info printing...\n"); printf("kw: %d, direction: %d, pirange: %d, upsampfac: %lf, \nbeta: %lf, halfwidth: %lf, c: %lf\n", plan->copts.kw, plan->copts.direction, plan->copts.pirange, plan->copts.upsampfac, plan->copts.ES_beta, plan->copts.ES_halfwidth, plan->copts.ES_c); PCS *fwkerhalf1 = (PCS*)malloc(sizeof(PCS)*(plan->nf1/2+1)); PCS *fwkerhalf2 = (PCS*)malloc(sizeof(PCS)*(plan->nf2/2+1)); checkCudaErrors(hipMemcpy(fwkerhalf1,plan->fwkerhalf1,(plan->nf1/2+1)* sizeof(PCS),hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(fwkerhalf2,plan->fwkerhalf2,(plan->nf2/2+1)* sizeof(PCS),hipMemcpyDeviceToHost)); printf("correction factor print...\n"); for(int i=0; i<nf1/2+1; i++){ printf("%.3g ", fwkerhalf1[i]); } printf("\n"); for(int i=0; i<nf2/2+1; i++){ printf("%.3g ", fwkerhalf2[i]); } printf("\n"); // free host fwkerhalf free(fwkerhalf1); free(fwkerhalf2); #endif // // copy to device // checkCudaErrors(hipMemcpy(plan->fwkerhalf1,fwkerhalf1,(plan->nf1/2+1)* // sizeof(PCS),hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(plan->fwkerhalf2,fwkerhalf2,(plan->nf2/2+1)* // sizeof(PCS),hipMemcpyHostToDevice)); // cufft plan setting hipfftHandle fftplan; int n[] = {plan->nf2, plan->nf1}; int inembed[] = {plan->nf2, plan->nf1}; int onembed[] = {plan->nf2, plan->nf1}; // hipfftCreate(&fftplan); // hipfftPlan2d(&fftplan,n[0],n[1],CUFFT_TYPE); // the bach size sets as the num of w when memory is sufficent. Alternative way, set as a smaller number when memory is insufficient. // and handle this piece by piece hipfftPlanMany(&fftplan,2,n,inembed,1,inembed[0]*inembed[1], onembed,1,onembed[0]*onembed[1],CUFFT_TYPE,plan->nf3); //need to check and revise (the partial conv will be differnt) plan->fftplan = fftplan; // set up bin size +++ (for other methods) and related malloc based on gpu method // assign memory for index after sorting (can be done in setup_plan) // bin sorting (for other methods) if(ier == 1){ printf("errors in gridder setting\n"); return ier; } // fw (conv res set) checkCudaErrors(hipMalloc((void**)&d_fw,sizeof(CUCPX)*nf1*nf2)); checkCudaErrors(hipMemset(d_fw, 0, sizeof(CUCPX)*nf1*nf2)); plan->fw = d_fw; // fk malloc and set // checkCudaErrors(hipMalloc((void**)&d_fk,sizeof(CUCPX)*N1*N2)); plan->fk = d_fk; // calulating result curafft_deconv(plan); printf("deconv result printing...\n"); CPX *fw = (CPX *)malloc(sizeof(CPX)*nf1*nf2); hipMemcpy(fw,plan->fw,sizeof(CUCPX)*nf1*nf2,hipMemcpyDeviceToHost); for(int i=0; i<nf2; i++){ for(int j=0; j<nf1; j++){ printf("%.3g ",fw[i*nf1+j].real()); } printf("\n"); } #ifdef DEBUG printf("conv result printing...\n"); CPX *fw = (CPX *)malloc(sizeof(CPX)*nf1*nf2); PCS temp_res=0; hipMemcpy(fw,plan->fw,sizeof(CUCPX)*nf1*nf2,hipMemcpyDeviceToHost); for(int i=0; i<nf2; i++){ for(int j=0; j<nf1; j++){ printf("%.3g ",fw[i*nf1+j].real()); temp_res += fw[i*nf1+j].real(); } printf("\n"); } printf("fft(0,0) %.3g\n",temp_res); #endif // fft CUFFT_EXEC(plan->fftplan, plan->fw, plan->fw, plan->iflag); // #ifdef DEBUG printf("fft result printing...\n"); // CPX *fw = (CPX *)malloc(sizeof(CPX)*nf1*nf2); hipMemcpy(fw,plan->fw,sizeof(CUCPX)*nf1*nf2,hipMemcpyDeviceToHost); for(int i=0; i<nf2; i++){ for(int j=0; j<nf1; j++){ printf("%.3g ",fw[i*nf1+j].real()); } printf("\n"); } free(fw); // #endif // printf("correction factor printing...\n"); // for(int i=0; i<N1/2; i++){ // printf("%.3g ",fwkerhalf1[i]); // } // printf("\n"); // for(int i=0; i<N2/2; i++){ // printf("%.3g ",fwkerhalf2[i]); // } // printf("\n"); // deconv ier = curafft_interp(plan); c = (CPX *)malloc(M * sizeof(CPX)); checkCudaErrors(hipMemcpy(c,plan->d_c,sizeof(CUCPX)*M, hipMemcpyDeviceToHost)); // result printing printf("final result printing...\n"); for(int i=0; i<M; i++){ printf("%.10lf ",c[i].real()); } printf("ground truth printing...\n"); CPX *truth = (CPX *) malloc(sizeof(CPX)*M); CPX Ft = CPX(0,0), J = IMA*(PCS)iflag; for (int k=0; k<M; ++k){ for(int i=0; i<N2; i++){ for(int j=0; j<N1; j++) Ft += fk[i*N1+j]*exp(J*((j-N1/2)*u[k]+(i-N2/2)*v[k])); } printf("%lf ",Ft); truth[k] = Ft; Ft.real(0); Ft.imag(0); } printf("\n"); double max=0; double l2_max=0; double fk_max = 0; for(int i=0; i<M; i++){ if(abs(c[i].real())>fk_max)fk_max = abs(c[i].real()); } printf("fk max %lf\n",fk_max); for(int i=0; i<M; i++){ double temp = abs(truth[i].real()-c[i].real()); if(temp>max) max = temp; if(temp/fk_max > l2_max) l2_max = temp/fk_max; } printf("maximal abs error %.5g, maximal l2 error %.5g\n",max,l2_max); //free curafft_free(plan); free(fk); free(u); free(v); free(c); return ier; }
f439714a094aa537036c4cab483b3dc29336f8b3.cu
#include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <thrust/complex.h> #include <algorithm> //#include <thrust> using namespace thrust; #include "ragridder_plan.h" #include "conv_interp_invoker.h" #include "cuft.h" #include "deconv.h" #include "cugridder.h" #include "precomp.h" #include "utils.h" int main(int argc, char *argv[]) { /* Input: M, N1, N2, epsilon method method - conv method M - number of randomly distributed points N1, N2 - output size epsilon - tolerance */ int ier = 0; if (argc < 4) { fprintf(stderr, "Usage: W Stacking\n" "Arguments:\n" " N1, N2 : image size.\n" " M: The number of randomly distributed points.\n" " epsilon: NUFFT tolerance (default 1e-6).\n" " kerevalmeth: Kernel evaluation method; one of\n" " 0: Exponential of square root (default), or\n" " 1: Horner evaluation.\n" " method: One of\n" " 0: nupts driven (default),\n" " 2: sub-problem, or\n"); return 1; } int N1, N2; PCS sigma = 2.0; // upsampling factor int M; double inp; sscanf(argv[1], "%d", &N1); sscanf(argv[2], "%d", &N2); sscanf(argv[3], "%d", &M); PCS epsilon = 1e-6; if(argc>4){ sscanf(argv[4], "%lf", &inp); epsilon = inp; } int kerevalmeth = 0; if(argc>5)sscanf(argv[5], "%d", &kerevalmeth); int method=0; if(argc>6)sscanf(argv[6], "%d", &method); //gpu_method == 0, nupts driven //int ier; PCS *u, *v; CPX *c; u = (PCS *)malloc(M * sizeof(PCS)); //Allocates page-locked memory on the host. v = (PCS *)malloc(M * sizeof(PCS)); PCS *d_u, *d_v; CUCPX *d_c, *d_fk; CUCPX *d_fw; checkCudaErrors(cudaMalloc(&d_u, M * sizeof(PCS))); checkCudaErrors(cudaMalloc(&d_v, M * sizeof(PCS))); checkCudaErrors(cudaMalloc(&d_c, M * sizeof(CUCPX))); checkCudaErrors(cudaMalloc(&d_fk, N1*N2*sizeof(CUCPX))); // generating data for (int i = 0; i < M; i++) { u[i] = randm11()*PI; //xxxxx printf("%lf ",u[i]); v[i] = randm11()*PI; // wgt[i] = 1; } printf("\n"); CPX *fk = (CPX*) malloc(sizeof(CPX)*N1*N2); for(int i=0; i<N1*N2; i++){ fk[i].real(1.0); fk[i].imag(1.0); } // double a[5] = {-PI/2, -PI/3, 0, PI/3, PI/2}; // change to random data // for(int i=0; i<M; i++){ // u[i] = a[i/5]; // v[i] = a[i%5]; // } #ifdef DEBUG printf("origial input data...\n"); for(int i=0; i<M; i++){ printf("%.3lf ",u[i]); } printf("\n"); for(int i=0; i<M; i++){ printf("%.3lf ",c[i].real()); } printf("\n"); #endif // ignore the tdirty // how to convert ms to vis //printf("generated data, x[1] %2.2g, y[1] %2.2g , z[1] %2.2g, c[1] %2.2g\n",x[1] , y[1], z[1], c[1].real()); // Timing begin //data transfer checkCudaErrors(cudaMemcpy(d_u, u, M * sizeof(PCS), cudaMemcpyHostToDevice)); //u checkCudaErrors(cudaMemcpy(d_v, v, M * sizeof(PCS), cudaMemcpyHostToDevice)); //v checkCudaErrors(cudaMemcpy(d_fk, fk, N1 * N2 * sizeof(CUCPX), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset(d_c, 0, M * sizeof(CUCPX))); /* ----------Step2: plan setting------------*/ curafft_plan *plan; plan = new curafft_plan(); memset(plan, 0, sizeof(curafft_plan)); int direction = 0; // opts and copts setting plan->opts.gpu_device_id = 0; plan->opts.upsampfac = sigma; plan->opts.gpu_sort = 1; plan->opts.gpu_binsizex = -1; plan->opts.gpu_binsizey = -1; plan->opts.gpu_binsizez = -1; plan->opts.gpu_kerevalmeth = kerevalmeth; plan->opts.gpu_conv_only = 0; plan->opts.gpu_gridder_method = method; plan->type = 2; ier = setup_conv_opts(plan->copts, epsilon, sigma, 1, direction, kerevalmeth); //check the arguements if(ier!=0)printf("setup_error\n"); // plan setting // cuda stream malloc in setup_plan int nf1 = get_num_cells(N1,plan->copts); int nf2 = get_num_cells(N2,plan->copts); plan->dim = 2; setup_plan(nf1, nf2, 1, M, d_u, d_v, NULL, d_c, plan); plan->ms = N1; plan->mt = N2; plan->mu = 1; plan->execute_flow = 1; int iflag = -1; int fftsign = (iflag>=0) ? 1 : -1; plan->iflag = fftsign; //may be useless| conflict with direction plan->batchsize = 1; plan->copts.direction = direction; // related to type // // fw allocation // checkCudaErrors(cudaMalloc((void**)&plan->fw,sizeof(CUCPX)*nf1*nf2*nf3)); // PCS *fwkerhalf1 = (PCS*)malloc(sizeof(PCS)*(plan->nf1/2+1)); // onedim_fseries_kernel_seq(plan->nf1, fwkerhalf1, plan->copts); // used for correction // PCS *fwkerhalf2 = (PCS*)malloc(sizeof(PCS)*(plan->nf2/2+1)); // onedim_fseries_kernel_seq(plan->nf2, fwkerhalf2, plan->copts); fourier_series_appro_invoker(plan->fwkerhalf1, plan->copts, plan->nf1/2+1); printf("correction factor printing...\n"); PCS *corr = (PCS*) malloc(sizeof(PCS)*(plan->nf1/2+1)); checkCudaErrors(cudaMemcpy(corr,plan->fwkerhalf1,sizeof(PCS)*(plan->nf1/2+1),cudaMemcpyDeviceToHost)); for(int i=0; i<10; i++){ printf("%.3lf ",corr[i]); } printf("\n"); fourier_series_appro_invoker(plan->fwkerhalf2, plan->copts, plan->nf2/2+1); #ifdef DEBUG printf("nf1, nf2 %d %d\n",plan->nf1,plan->nf2); printf("copts info printing...\n"); printf("kw: %d, direction: %d, pirange: %d, upsampfac: %lf, \nbeta: %lf, halfwidth: %lf, c: %lf\n", plan->copts.kw, plan->copts.direction, plan->copts.pirange, plan->copts.upsampfac, plan->copts.ES_beta, plan->copts.ES_halfwidth, plan->copts.ES_c); PCS *fwkerhalf1 = (PCS*)malloc(sizeof(PCS)*(plan->nf1/2+1)); PCS *fwkerhalf2 = (PCS*)malloc(sizeof(PCS)*(plan->nf2/2+1)); checkCudaErrors(cudaMemcpy(fwkerhalf1,plan->fwkerhalf1,(plan->nf1/2+1)* sizeof(PCS),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(fwkerhalf2,plan->fwkerhalf2,(plan->nf2/2+1)* sizeof(PCS),cudaMemcpyDeviceToHost)); printf("correction factor print...\n"); for(int i=0; i<nf1/2+1; i++){ printf("%.3g ", fwkerhalf1[i]); } printf("\n"); for(int i=0; i<nf2/2+1; i++){ printf("%.3g ", fwkerhalf2[i]); } printf("\n"); // free host fwkerhalf free(fwkerhalf1); free(fwkerhalf2); #endif // // copy to device // checkCudaErrors(cudaMemcpy(plan->fwkerhalf1,fwkerhalf1,(plan->nf1/2+1)* // sizeof(PCS),cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(plan->fwkerhalf2,fwkerhalf2,(plan->nf2/2+1)* // sizeof(PCS),cudaMemcpyHostToDevice)); // cufft plan setting cufftHandle fftplan; int n[] = {plan->nf2, plan->nf1}; int inembed[] = {plan->nf2, plan->nf1}; int onembed[] = {plan->nf2, plan->nf1}; // cufftCreate(&fftplan); // cufftPlan2d(&fftplan,n[0],n[1],CUFFT_TYPE); // the bach size sets as the num of w when memory is sufficent. Alternative way, set as a smaller number when memory is insufficient. // and handle this piece by piece cufftPlanMany(&fftplan,2,n,inembed,1,inembed[0]*inembed[1], onembed,1,onembed[0]*onembed[1],CUFFT_TYPE,plan->nf3); //need to check and revise (the partial conv will be differnt) plan->fftplan = fftplan; // set up bin size +++ (for other methods) and related malloc based on gpu method // assign memory for index after sorting (can be done in setup_plan) // bin sorting (for other methods) if(ier == 1){ printf("errors in gridder setting\n"); return ier; } // fw (conv res set) checkCudaErrors(cudaMalloc((void**)&d_fw,sizeof(CUCPX)*nf1*nf2)); checkCudaErrors(cudaMemset(d_fw, 0, sizeof(CUCPX)*nf1*nf2)); plan->fw = d_fw; // fk malloc and set // checkCudaErrors(cudaMalloc((void**)&d_fk,sizeof(CUCPX)*N1*N2)); plan->fk = d_fk; // calulating result curafft_deconv(plan); printf("deconv result printing...\n"); CPX *fw = (CPX *)malloc(sizeof(CPX)*nf1*nf2); cudaMemcpy(fw,plan->fw,sizeof(CUCPX)*nf1*nf2,cudaMemcpyDeviceToHost); for(int i=0; i<nf2; i++){ for(int j=0; j<nf1; j++){ printf("%.3g ",fw[i*nf1+j].real()); } printf("\n"); } #ifdef DEBUG printf("conv result printing...\n"); CPX *fw = (CPX *)malloc(sizeof(CPX)*nf1*nf2); PCS temp_res=0; cudaMemcpy(fw,plan->fw,sizeof(CUCPX)*nf1*nf2,cudaMemcpyDeviceToHost); for(int i=0; i<nf2; i++){ for(int j=0; j<nf1; j++){ printf("%.3g ",fw[i*nf1+j].real()); temp_res += fw[i*nf1+j].real(); } printf("\n"); } printf("fft(0,0) %.3g\n",temp_res); #endif // fft CUFFT_EXEC(plan->fftplan, plan->fw, plan->fw, plan->iflag); // #ifdef DEBUG printf("fft result printing...\n"); // CPX *fw = (CPX *)malloc(sizeof(CPX)*nf1*nf2); cudaMemcpy(fw,plan->fw,sizeof(CUCPX)*nf1*nf2,cudaMemcpyDeviceToHost); for(int i=0; i<nf2; i++){ for(int j=0; j<nf1; j++){ printf("%.3g ",fw[i*nf1+j].real()); } printf("\n"); } free(fw); // #endif // printf("correction factor printing...\n"); // for(int i=0; i<N1/2; i++){ // printf("%.3g ",fwkerhalf1[i]); // } // printf("\n"); // for(int i=0; i<N2/2; i++){ // printf("%.3g ",fwkerhalf2[i]); // } // printf("\n"); // deconv ier = curafft_interp(plan); c = (CPX *)malloc(M * sizeof(CPX)); checkCudaErrors(cudaMemcpy(c,plan->d_c,sizeof(CUCPX)*M, cudaMemcpyDeviceToHost)); // result printing printf("final result printing...\n"); for(int i=0; i<M; i++){ printf("%.10lf ",c[i].real()); } printf("ground truth printing...\n"); CPX *truth = (CPX *) malloc(sizeof(CPX)*M); CPX Ft = CPX(0,0), J = IMA*(PCS)iflag; for (int k=0; k<M; ++k){ for(int i=0; i<N2; i++){ for(int j=0; j<N1; j++) Ft += fk[i*N1+j]*exp(J*((j-N1/2)*u[k]+(i-N2/2)*v[k])); } printf("%lf ",Ft); truth[k] = Ft; Ft.real(0); Ft.imag(0); } printf("\n"); double max=0; double l2_max=0; double fk_max = 0; for(int i=0; i<M; i++){ if(abs(c[i].real())>fk_max)fk_max = abs(c[i].real()); } printf("fk max %lf\n",fk_max); for(int i=0; i<M; i++){ double temp = abs(truth[i].real()-c[i].real()); if(temp>max) max = temp; if(temp/fk_max > l2_max) l2_max = temp/fk_max; } printf("maximal abs error %.5g, maximal l2 error %.5g\n",max,l2_max); //free curafft_free(plan); free(fk); free(u); free(v); free(c); return ier; }
2072de4d2bc191716f701ff4509601cdfb539edd.hip
// !!! This is a file automatically generated by hipify!!! /*! * @file YuriConvertCuda.cu * @author Zdenek Travnicek * @date 13.8.2010 * @date 16.2.2013 * @copyright Institute of Intermedia, CTU in Prague, 2010 - 2013 * Distributed under modified BSD Licence, details in file doc/LICENSE * */ #include <hip/hip_runtime.h> //include "yuri/video/YuriConvertor.h" __device__ void _RGB2YUV(char*s, float *Y, float *Cb, float*Cr, float Wb, float Wr, float Wg, float Kb, float Kr) { float r,g,b; r = (float)((unsigned char)(s[0]))/255.0f; g = (float)((unsigned char)(s[1]))/255.0f; b = (float)((unsigned char)(s[2]))/255.0f; *Y=r*Wr + g*Wg + b*Wb; *Cb=(b-*Y)*Kb; *Cr=(r-*Y)*Kr; } __device__ unsigned int luma(float Y) { return (unsigned int)(64 + Y * 876); } __device__ unsigned int chroma(float Y) { return (unsigned int)(512 + Y * 896); } __global__ void RGB2YUV(char *s, char *d, size_t num, float Wb, float Wr, float Wg, float Kb, float Kr) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //if (2*idx>=num) return; int si = 6*idx, di=5*idx; float Y1, Cr1, Cb1, Y2, Cb2, Cr2; unsigned int Ya, Yb, Cb, Cr; for (int i=idx;i<num;i+=4096) { si = i * 6; di = i * 5; _RGB2YUV(s+si+0, &Y1, &Cb1, &Cr1, Wb, Wr, Wg, Kb, Kr); _RGB2YUV(s+si+3, &Y2, &Cb2, &Cr2, Wb, Wr, Wg, Kb, Kr); Ya = luma(Y1); Yb = luma(Y2); Cr= chroma((Cr1+Cr2)/2.0f); Cb= chroma((Cb1+Cb2)/2.0f); d[di]=Ya&0xFF; d[di+1]=((Ya>>8)&0x03) | ((Cr<<2)&0xfc); d[di+2]=((Cr>>6)&0x0F) | ((Yb<<4)&0xF0); d[di+3]=((Yb>>4)&0x3F) | ((Cb<<6)&0xC0); d[di+4]=((Cb>>2)&0xFF); } } __device__ unsigned int chr(float v) { float val = v * 255.0f; return (unsigned int)(val>255?255:(val<0?0:val)); } __global__ void YUV162RGB(char *s, char *d, size_t num, float Wb, float Wr, float Wg, float Kb, float Kr) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (2*idx>=num) return; int si = 6*idx, di=5*idx; float Y1, Y2, U, V; for (int i=idx;i<num/2;i+=4096) { si = i * 4; di = i * 6; Y1 = (float)((unsigned char)(s[si+0]))/255.0f; Y2 = (float)((unsigned char)(s[si+2]))/255.0f; U = (float)((unsigned char)(s[si+1]))/255.0f - 0.5f; V = (float)((unsigned char)(s[si+3]))/255.0f - 0.5f; d[di+0] = chr(Y1 + 1.403*V); d[di+1] = chr(Y1 - 0.344*U - 0.714*V); d[di+2] = chr(Y1 + 1.770*U); d[di+3] = chr(Y2 + 1.403*V); d[di+4] = chr(Y2 - 0.344*U - 0.714*V); d[di+5] = chr(Y2 + 1.770*U); } } void *CudaAlloc(unsigned int size) { void *x; hipMalloc((void **) &x, size*sizeof(char)); return x; } void CudaDealloc(void *mem) { hipFree(mem); } bool YuriConvertRGB24_YUV20(const char *src, char *dest, void *src_cuda, void *dest_cuda,unsigned int num, float Wb, float Wr) { size_t size = num*3, out_size=num*5/2; hipMemcpy(reinterpret_cast<char*>(src_cuda),reinterpret_cast<const char*>(src), size,hipMemcpyHostToDevice); float Wg, Kb, Kr; Wg=1.0f-Wr-Wb; Kb = 0.5f / (1.0f - Wb); Kr = 0.5f / (1.0f - Wr); // for (int i=0;i<1000;++i) { hipLaunchKernelGGL(( RGB2YUV) , dim3(512), dim3(8) , 0, 0, reinterpret_cast<char*>(src_cuda), reinterpret_cast<char*>(dest_cuda), num, Wb,Wr,Wg,Kb,Kr); // } hipMemcpy(reinterpret_cast<char*>(dest),reinterpret_cast<char*>(dest_cuda), out_size,hipMemcpyDeviceToHost); return true; } bool YuriConvertYUV16_RGB24(const char *src, char *dest, void *src_cuda, void *dest_cuda,unsigned int num, float Wb, float Wr) { size_t size = num*2, out_size=num*3; hipMemcpy(src_cuda,src,size,hipMemcpyHostToDevice); float Wg, Kb, Kr; Wg=1.0f-Wr-Wb; Kb = 0.5f / (1.0f - Wb); Kr = 0.5f / (1.0f - Wr); hipLaunchKernelGGL(( YUV162RGB) , dim3(512), dim3(8) , 0, 0, reinterpret_cast<char*>(src_cuda), reinterpret_cast<char*>(dest_cuda), num, Wb,Wr,Wg,Kb,Kr); hipMemcpy(dest,dest_cuda,out_size,hipMemcpyDeviceToHost); return true; }
2072de4d2bc191716f701ff4509601cdfb539edd.cu
/*! * @file YuriConvertCuda.cu * @author Zdenek Travnicek * @date 13.8.2010 * @date 16.2.2013 * @copyright Institute of Intermedia, CTU in Prague, 2010 - 2013 * Distributed under modified BSD Licence, details in file doc/LICENSE * */ #include <cuda.h> //include "yuri/video/YuriConvertor.h" __device__ void _RGB2YUV(char*s, float *Y, float *Cb, float*Cr, float Wb, float Wr, float Wg, float Kb, float Kr) { float r,g,b; r = (float)((unsigned char)(s[0]))/255.0f; g = (float)((unsigned char)(s[1]))/255.0f; b = (float)((unsigned char)(s[2]))/255.0f; *Y=r*Wr + g*Wg + b*Wb; *Cb=(b-*Y)*Kb; *Cr=(r-*Y)*Kr; } __device__ unsigned int luma(float Y) { return (unsigned int)(64 + Y * 876); } __device__ unsigned int chroma(float Y) { return (unsigned int)(512 + Y * 896); } __global__ void RGB2YUV(char *s, char *d, size_t num, float Wb, float Wr, float Wg, float Kb, float Kr) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //if (2*idx>=num) return; int si = 6*idx, di=5*idx; float Y1, Cr1, Cb1, Y2, Cb2, Cr2; unsigned int Ya, Yb, Cb, Cr; for (int i=idx;i<num;i+=4096) { si = i * 6; di = i * 5; _RGB2YUV(s+si+0, &Y1, &Cb1, &Cr1, Wb, Wr, Wg, Kb, Kr); _RGB2YUV(s+si+3, &Y2, &Cb2, &Cr2, Wb, Wr, Wg, Kb, Kr); Ya = luma(Y1); Yb = luma(Y2); Cr= chroma((Cr1+Cr2)/2.0f); Cb= chroma((Cb1+Cb2)/2.0f); d[di]=Ya&0xFF; d[di+1]=((Ya>>8)&0x03) | ((Cr<<2)&0xfc); d[di+2]=((Cr>>6)&0x0F) | ((Yb<<4)&0xF0); d[di+3]=((Yb>>4)&0x3F) | ((Cb<<6)&0xC0); d[di+4]=((Cb>>2)&0xFF); } } __device__ unsigned int chr(float v) { float val = v * 255.0f; return (unsigned int)(val>255?255:(val<0?0:val)); } __global__ void YUV162RGB(char *s, char *d, size_t num, float Wb, float Wr, float Wg, float Kb, float Kr) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (2*idx>=num) return; int si = 6*idx, di=5*idx; float Y1, Y2, U, V; for (int i=idx;i<num/2;i+=4096) { si = i * 4; di = i * 6; Y1 = (float)((unsigned char)(s[si+0]))/255.0f; Y2 = (float)((unsigned char)(s[si+2]))/255.0f; U = (float)((unsigned char)(s[si+1]))/255.0f - 0.5f; V = (float)((unsigned char)(s[si+3]))/255.0f - 0.5f; d[di+0] = chr(Y1 + 1.403*V); d[di+1] = chr(Y1 - 0.344*U - 0.714*V); d[di+2] = chr(Y1 + 1.770*U); d[di+3] = chr(Y2 + 1.403*V); d[di+4] = chr(Y2 - 0.344*U - 0.714*V); d[di+5] = chr(Y2 + 1.770*U); } } void *CudaAlloc(unsigned int size) { void *x; cudaMalloc((void **) &x, size*sizeof(char)); return x; } void CudaDealloc(void *mem) { cudaFree(mem); } bool YuriConvertRGB24_YUV20(const char *src, char *dest, void *src_cuda, void *dest_cuda,unsigned int num, float Wb, float Wr) { size_t size = num*3, out_size=num*5/2; cudaMemcpy(reinterpret_cast<char*>(src_cuda),reinterpret_cast<const char*>(src), size,cudaMemcpyHostToDevice); float Wg, Kb, Kr; Wg=1.0f-Wr-Wb; Kb = 0.5f / (1.0f - Wb); Kr = 0.5f / (1.0f - Wr); // for (int i=0;i<1000;++i) { RGB2YUV <<<512, 8 >>> (reinterpret_cast<char*>(src_cuda), reinterpret_cast<char*>(dest_cuda), num, Wb,Wr,Wg,Kb,Kr); // } cudaMemcpy(reinterpret_cast<char*>(dest),reinterpret_cast<char*>(dest_cuda), out_size,cudaMemcpyDeviceToHost); return true; } bool YuriConvertYUV16_RGB24(const char *src, char *dest, void *src_cuda, void *dest_cuda,unsigned int num, float Wb, float Wr) { size_t size = num*2, out_size=num*3; cudaMemcpy(src_cuda,src,size,cudaMemcpyHostToDevice); float Wg, Kb, Kr; Wg=1.0f-Wr-Wb; Kb = 0.5f / (1.0f - Wb); Kr = 0.5f / (1.0f - Wr); YUV162RGB <<<512, 8 >>> (reinterpret_cast<char*>(src_cuda), reinterpret_cast<char*>(dest_cuda), num, Wb,Wr,Wg,Kb,Kr); cudaMemcpy(dest,dest_cuda,out_size,cudaMemcpyDeviceToHost); return true; }
5315df15ebccade085771397100d11498aaa341e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Prerequisites.cuh" #include "Generics.cuh" namespace gtom { #define MonoTpB 192 //////////////////////////// //CUDA kernel declarations// //////////////////////////// template <class T, uint blockSize, bool nIsPow2> __global__ void SumKernel(T* d_input, T* d_output, size_t n); template <class T> __global__ void SumMonolithicKernel(T* d_input, T* d_output, int n); template <class T> __global__ void SumMonolithicMaskedKernel(T* d_input, T* d_output, tfloat* d_mask, int n); /////// //Sum// /////// void GetNumBlocksAndThreads(size_t n, int &blocks, int &threads, int maxblocks) { //get device capability, to avoid block/grid size excceed the upbound hipDeviceProp_t prop; int device; hipGetDevice(&device); hipGetDeviceProperties(&prop, device); size_t maxthreads = 512; threads = (int)((n < maxthreads * 2) ? NextPow2((n + 1) / 2) : maxthreads); size_t totalblocks = (n + (threads * 2 - 1)) / (threads * 2); totalblocks = tmin((size_t)maxblocks, totalblocks); blocks = (int)totalblocks; } template <class T> void SumReduce(T *d_input, T *d_output, size_t n, int blocks, int threads) { dim3 dimBlock = dim3(threads); dim3 dimGrid = dim3(blocks); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch if (IsPow2(n)) switch (threads) { case 512: SumKernel<T, 512, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 256: SumKernel<T, 256, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 128: SumKernel<T, 128, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 64: SumKernel<T, 64, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 32: SumKernel<T, 32, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 16: SumKernel<T, 16, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 8: SumKernel<T, 8, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 4: SumKernel<T, 4, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 2: SumKernel<T, 2, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 1: SumKernel<T, 1, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; } else switch (threads) { case 512: SumKernel<T, 512, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 256: SumKernel<T, 256, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 128: SumKernel<T, 128, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 64: SumKernel<T, 64, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 32: SumKernel<T, 32, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 16: SumKernel<T, 16, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 8: SumKernel<T, 8, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 4: SumKernel<T, 4, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 2: SumKernel<T, 2, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 1: SumKernel<T, 1, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; } hipStreamQuery(0); } template <class T> void d_Sum(T* d_input, T* d_output, size_t n, int batch) { if (n <= 0) return; int maxblocks = 512; int numblocks = 0; int numthreads = 0; GetNumBlocksAndThreads(n, numblocks, numthreads, maxblocks); T* d_intermediate; hipMalloc((void**)&d_intermediate, numblocks * batch * sizeof(T)); //T* h_intermediate = (T*)malloc(numblocks * sizeof(T)); for (int b = 0; b < batch; b++) { SumReduce<T>(d_input + n * b, d_intermediate + numblocks * b, n, numblocks, numthreads); /*hipMemcpy(h_intermediate, d_intermediate, numblocks * sizeof(T), hipMemcpyDeviceToHost); T result = h_intermediate[0]; T c = 0, y, t; for (int i = 1; i < numblocks; i++) { y = h_intermediate[i] - c; t = result + y; c = (t - result) - y; result = t; } hipMemcpy(d_output + b, &result, sizeof(T), hipMemcpyHostToDevice);*/ } d_SumMonolithic(d_intermediate, d_output, numblocks, batch); //free(h_intermediate); hipFree(d_intermediate); } template void d_Sum<float>(float* d_input, float* d_output, size_t n, int batch); template void d_Sum<double>(double* d_input, double* d_output, size_t n, int batch); template void d_Sum<int>(int* d_input, int* d_output, size_t n, int batch); template <class T> void d_SumMonolithic(T* d_input, T* d_output, int n, int batch) { for (int b = 0; b < batch; b += 32768) { dim3 grid = dim3(min(batch - b, 32768)); SumMonolithicKernel << <grid, MonoTpB >> > (d_input + n * b, d_output + b, n); } } template void d_SumMonolithic<float>(float* d_input, float* d_output, int n, int batch); template void d_SumMonolithic<float2>(float2* d_input, float2* d_output, int n, int batch); template void d_SumMonolithic<double>(double* d_input, double* d_output, int n, int batch); template void d_SumMonolithic<int>(int* d_input, int* d_output, int n, int batch); template <class T> void d_SumMonolithic(T* d_input, T* d_output, tfloat* d_mask, int n, int batch) { if (d_mask != NULL) for (int b = 0; b < batch; b += 32768) { dim3 grid = dim3(min(batch - b, 32768)); SumMonolithicMaskedKernel << <grid, MonoTpB >> > (d_input + n * b, d_output + b, d_mask + n * b, n); } else d_SumMonolithic(d_input, d_output, n, batch); } template void d_SumMonolithic<float>(float* d_input, float* d_output, tfloat* d_mask, int n, int batch); template void d_SumMonolithic<double>(double* d_input, double* d_output, tfloat* d_mask, int n, int batch); template void d_SumMonolithic<int>(int* d_input, int* d_output, tfloat* d_mask, int n, int batch); //////////////// //CUDA kernels// //////////////// //Slightly modified version of the reduce kernel from CUDA SDK 5.5 template <class T, uint blockSize, bool nIsPow2> __global__ void SumKernel(T* d_input, T* d_output, size_t n) { __shared__ T sdata[blockSize]; // perform first level of reduction, // reading from global memory, writing to shared memory uint tid = threadIdx.x; size_t i = blockIdx.x * blockSize * 2 + threadIdx.x; uint gridSize = blockSize * 2 * gridDim.x; T mySum = 0; T c = 0, y, t, val; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { val = d_input[i]; y = val - c; t = mySum + y; c = (t - mySum) - y; mySum = t; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) { val = d_input[i + blockSize]; y = val - c; t = mySum + y; c = (t - mySum) - y; mySum = t; } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { T* smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; __syncthreads(); } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; __syncthreads(); } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; __syncthreads(); } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; __syncthreads(); } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; __syncthreads(); } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; __syncthreads(); } } // write result for this block to global mem if (tid == 0) d_output[blockIdx.x] = sdata[0]; } template <class T> __global__ void SumMonolithicKernel(T* d_input, T* d_output, int n) { __shared__ T sums[MonoTpB]; d_input += n * blockIdx.x; T result = 0; T c = 0, y, t; for (int id = threadIdx.x; id < n; id += blockDim.x) { y = d_input[id] - c; t = result + y; c = (t - result) - y; result = t; } sums[threadIdx.x] = result; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; i++) { y = sums[i] - c; t = result + y; c = (t - result) - y; result = t; } d_output[blockIdx.x] = result; } } template<> __global__ void SumMonolithicKernel<float2>(float2* d_input, float2* d_output, int n) { __shared__ float2 sums[MonoTpB]; d_input += n * blockIdx.x; float2 result = make_float2(0.0f); float2 c = make_float2(0.0f, 0.0f), y, t; for (int id = threadIdx.x; id < n; id += blockDim.x) { y = d_input[id] - c; t = result + y; c = (t - result) - y; result = t; } sums[threadIdx.x] = result; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; i++) { y = sums[i] - c; t = result + y; c = (t - result) - y; result = t; } d_output[blockIdx.x] = result; } } template <class T> __global__ void SumMonolithicMaskedKernel(T* d_input, T* d_output, tfloat* d_mask, int n) { __shared__ T sums[MonoTpB]; d_input += n * blockIdx.x; d_mask += n * blockIdx.x; T result = 0; T c = 0, y, t; for (int id = threadIdx.x; id < n; id += blockDim.x) { y = d_input[id] * d_mask[id] - c; t = result + y; c = (t - result) - y; result = t; } sums[threadIdx.x] = result; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; i++) { y = sums[i] - c; t = result + y; c = (t - result) - y; result = t; } d_output[blockIdx.x] = result; } } }
5315df15ebccade085771397100d11498aaa341e.cu
#include "Prerequisites.cuh" #include "Generics.cuh" namespace gtom { #define MonoTpB 192 //////////////////////////// //CUDA kernel declarations// //////////////////////////// template <class T, uint blockSize, bool nIsPow2> __global__ void SumKernel(T* d_input, T* d_output, size_t n); template <class T> __global__ void SumMonolithicKernel(T* d_input, T* d_output, int n); template <class T> __global__ void SumMonolithicMaskedKernel(T* d_input, T* d_output, tfloat* d_mask, int n); /////// //Sum// /////// void GetNumBlocksAndThreads(size_t n, int &blocks, int &threads, int maxblocks) { //get device capability, to avoid block/grid size excceed the upbound cudaDeviceProp prop; int device; cudaGetDevice(&device); cudaGetDeviceProperties(&prop, device); size_t maxthreads = 512; threads = (int)((n < maxthreads * 2) ? NextPow2((n + 1) / 2) : maxthreads); size_t totalblocks = (n + (threads * 2 - 1)) / (threads * 2); totalblocks = tmin((size_t)maxblocks, totalblocks); blocks = (int)totalblocks; } template <class T> void SumReduce(T *d_input, T *d_output, size_t n, int blocks, int threads) { dim3 dimBlock = dim3(threads); dim3 dimGrid = dim3(blocks); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch if (IsPow2(n)) switch (threads) { case 512: SumKernel<T, 512, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 256: SumKernel<T, 256, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 128: SumKernel<T, 128, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 64: SumKernel<T, 64, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 32: SumKernel<T, 32, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 16: SumKernel<T, 16, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 8: SumKernel<T, 8, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 4: SumKernel<T, 4, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 2: SumKernel<T, 2, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 1: SumKernel<T, 1, true> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; } else switch (threads) { case 512: SumKernel<T, 512, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 256: SumKernel<T, 256, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 128: SumKernel<T, 128, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 64: SumKernel<T, 64, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 32: SumKernel<T, 32, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 16: SumKernel<T, 16, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 8: SumKernel<T, 8, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 4: SumKernel<T, 4, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 2: SumKernel<T, 2, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; case 1: SumKernel<T, 1, false> << <dimGrid, dimBlock, smemSize >> > (d_input, d_output, n); break; } cudaStreamQuery(0); } template <class T> void d_Sum(T* d_input, T* d_output, size_t n, int batch) { if (n <= 0) return; int maxblocks = 512; int numblocks = 0; int numthreads = 0; GetNumBlocksAndThreads(n, numblocks, numthreads, maxblocks); T* d_intermediate; cudaMalloc((void**)&d_intermediate, numblocks * batch * sizeof(T)); //T* h_intermediate = (T*)malloc(numblocks * sizeof(T)); for (int b = 0; b < batch; b++) { SumReduce<T>(d_input + n * b, d_intermediate + numblocks * b, n, numblocks, numthreads); /*cudaMemcpy(h_intermediate, d_intermediate, numblocks * sizeof(T), cudaMemcpyDeviceToHost); T result = h_intermediate[0]; T c = 0, y, t; for (int i = 1; i < numblocks; i++) { y = h_intermediate[i] - c; t = result + y; c = (t - result) - y; result = t; } cudaMemcpy(d_output + b, &result, sizeof(T), cudaMemcpyHostToDevice);*/ } d_SumMonolithic(d_intermediate, d_output, numblocks, batch); //free(h_intermediate); cudaFree(d_intermediate); } template void d_Sum<float>(float* d_input, float* d_output, size_t n, int batch); template void d_Sum<double>(double* d_input, double* d_output, size_t n, int batch); template void d_Sum<int>(int* d_input, int* d_output, size_t n, int batch); template <class T> void d_SumMonolithic(T* d_input, T* d_output, int n, int batch) { for (int b = 0; b < batch; b += 32768) { dim3 grid = dim3(min(batch - b, 32768)); SumMonolithicKernel << <grid, MonoTpB >> > (d_input + n * b, d_output + b, n); } } template void d_SumMonolithic<float>(float* d_input, float* d_output, int n, int batch); template void d_SumMonolithic<float2>(float2* d_input, float2* d_output, int n, int batch); template void d_SumMonolithic<double>(double* d_input, double* d_output, int n, int batch); template void d_SumMonolithic<int>(int* d_input, int* d_output, int n, int batch); template <class T> void d_SumMonolithic(T* d_input, T* d_output, tfloat* d_mask, int n, int batch) { if (d_mask != NULL) for (int b = 0; b < batch; b += 32768) { dim3 grid = dim3(min(batch - b, 32768)); SumMonolithicMaskedKernel << <grid, MonoTpB >> > (d_input + n * b, d_output + b, d_mask + n * b, n); } else d_SumMonolithic(d_input, d_output, n, batch); } template void d_SumMonolithic<float>(float* d_input, float* d_output, tfloat* d_mask, int n, int batch); template void d_SumMonolithic<double>(double* d_input, double* d_output, tfloat* d_mask, int n, int batch); template void d_SumMonolithic<int>(int* d_input, int* d_output, tfloat* d_mask, int n, int batch); //////////////// //CUDA kernels// //////////////// //Slightly modified version of the reduce kernel from CUDA SDK 5.5 template <class T, uint blockSize, bool nIsPow2> __global__ void SumKernel(T* d_input, T* d_output, size_t n) { __shared__ T sdata[blockSize]; // perform first level of reduction, // reading from global memory, writing to shared memory uint tid = threadIdx.x; size_t i = blockIdx.x * blockSize * 2 + threadIdx.x; uint gridSize = blockSize * 2 * gridDim.x; T mySum = 0; T c = 0, y, t, val; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { val = d_input[i]; y = val - c; t = mySum + y; c = (t - mySum) - y; mySum = t; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) { val = d_input[i + blockSize]; y = val - c; t = mySum + y; c = (t - mySum) - y; mySum = t; } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { T* smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; __syncthreads(); } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; __syncthreads(); } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; __syncthreads(); } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; __syncthreads(); } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; __syncthreads(); } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; __syncthreads(); } } // write result for this block to global mem if (tid == 0) d_output[blockIdx.x] = sdata[0]; } template <class T> __global__ void SumMonolithicKernel(T* d_input, T* d_output, int n) { __shared__ T sums[MonoTpB]; d_input += n * blockIdx.x; T result = 0; T c = 0, y, t; for (int id = threadIdx.x; id < n; id += blockDim.x) { y = d_input[id] - c; t = result + y; c = (t - result) - y; result = t; } sums[threadIdx.x] = result; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; i++) { y = sums[i] - c; t = result + y; c = (t - result) - y; result = t; } d_output[blockIdx.x] = result; } } template<> __global__ void SumMonolithicKernel<float2>(float2* d_input, float2* d_output, int n) { __shared__ float2 sums[MonoTpB]; d_input += n * blockIdx.x; float2 result = make_float2(0.0f); float2 c = make_float2(0.0f, 0.0f), y, t; for (int id = threadIdx.x; id < n; id += blockDim.x) { y = d_input[id] - c; t = result + y; c = (t - result) - y; result = t; } sums[threadIdx.x] = result; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; i++) { y = sums[i] - c; t = result + y; c = (t - result) - y; result = t; } d_output[blockIdx.x] = result; } } template <class T> __global__ void SumMonolithicMaskedKernel(T* d_input, T* d_output, tfloat* d_mask, int n) { __shared__ T sums[MonoTpB]; d_input += n * blockIdx.x; d_mask += n * blockIdx.x; T result = 0; T c = 0, y, t; for (int id = threadIdx.x; id < n; id += blockDim.x) { y = d_input[id] * d_mask[id] - c; t = result + y; c = (t - result) - y; result = t; } sums[threadIdx.x] = result; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < blockDim.x; i++) { y = sums[i] - c; t = result + y; c = (t - result) - y; result = t; } d_output[blockIdx.x] = result; } } }
913c628c0776a9433f41678a18bc01a8681cc15a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "myFirstKernel.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( myFirstKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( myFirstKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( myFirstKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
913c628c0776a9433f41678a18bc01a8681cc15a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "myFirstKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); myFirstKernel<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { myFirstKernel<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { myFirstKernel<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
11aa63543512823cc104b932ea9a4b34f11bba42.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <hip/hip_runtime.h> #include "l1d.h" #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 80 #define LINE_SIZE 128 #define SETS 4 #define ASSOC 256 // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; // Functions void CleanupResources(void); void RandomInit(int*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){ if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions #define CONFIG 50 // Device code __global__ static void PowerKernal(int* A, int* C, int iterations){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int); //32768 unsigned j=0, k=0; int m_sum=0; // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs #if (CONFIG==5 || CONFIG==10 || CONFIG==50 || CONFIG==100) for(k=0; k<iterations; ++k){ for(j=0; (j + (THREADS_PER_BLOCK*CONFIG)) < size; j+=THREADS_PER_BLOCK){ #if CONFIG==5 ld5_add5(m_sum, A, j); #elif CONFIG==10 ld10(m_sum, A, j); #elif CONFIG==50 ld50_add50(m_sum, A, j); #elif CONFIG==100 ld100(m_sum, A, j); #endif } } #else for(k=0; k<iterations; ++k){ for(j=0; j < size; j+=THREADS_PER_BLOCK){ } } #endif C[tid]=m_sum; } // Host code int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = (LINE_SIZE*ASSOC*SETS)/sizeof(int); //32768 size_t size = N * sizeof(int); // 128kB (l1d size with shmem = 0 bytes) // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); // h_B = (int*)malloc(size); // if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); // RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); // checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); // checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) hipFree(d_A); //if (d_B) // hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
11aa63543512823cc104b932ea9a4b34f11bba42.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes CUDA #include <cuda_runtime.h> #include "l1d.h" #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 80 #define LINE_SIZE 128 #define SETS 4 #define ASSOC 256 // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; // Functions void CleanupResources(void); void RandomInit(int*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions #define CONFIG 50 // Device code __global__ static void PowerKernal(int* A, int* C, int iterations){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int); //32768 unsigned j=0, k=0; int m_sum=0; // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs #if (CONFIG==5 || CONFIG==10 || CONFIG==50 || CONFIG==100) for(k=0; k<iterations; ++k){ for(j=0; (j + (THREADS_PER_BLOCK*CONFIG)) < size; j+=THREADS_PER_BLOCK){ #if CONFIG==5 ld5_add5(m_sum, A, j); #elif CONFIG==10 ld10(m_sum, A, j); #elif CONFIG==50 ld50_add50(m_sum, A, j); #elif CONFIG==100 ld100(m_sum, A, j); #endif } } #else for(k=0; k<iterations; ++k){ for(j=0; j < size; j+=THREADS_PER_BLOCK){ } } #endif C[tid]=m_sum; } // Host code int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = (LINE_SIZE*ASSOC*SETS)/sizeof(int); //32768 size_t size = N * sizeof(int); // 128kB (l1d size with shmem = 0 bytes) // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); // h_B = (int*)malloc(size); // if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); // RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); // checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); // checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) cudaFree(d_A); //if (d_B) // cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
0e1eb105eb0c6fac24f6446ea230fbc3792d6b13.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> #include <string> using namespace std; int main() { int driver_version = 0, runtime_version = 0; hipDriverGetVersion(&driver_version); hipRuntimeGetVersion(&runtime_version); printf("Driver Version: %d\n Runtime Version: %d\n", \ driver_version, runtime_version); int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); int cudaCores = 0; int SM = prop.multiProcessorCount; int major = prop.major; int minor = prop.minor; string arch = ""; switch (major) { case 1: arch = "TESLA"; cudaCores = 8; break; case 2: arch = "FERMI"; if (minor == 0) cudaCores = 32; else cudaCores = 48; break; case 3: arch = "KEPLER"; cudaCores = 192; break; case 5: arch = "MAXWELL"; cudaCores = 128; break; case 6: arch = "PASCAL"; if ((minor == 1) || (minor == 2)) cudaCores = 128; else if (minor == 0) cudaCores = SM * 64; else printf("Unknown device type\n"); break; case 7: if ((minor == 0) || (minor == 2)) { arch = "VOLTA"; cudaCores = 384; //tensorCores = 48; } if (minor == 5) arch = "TURING"; if ((minor == 0) || (minor == 5)) cudaCores = 64; else printf("Unknown device type\n"); break; case 8: arch = "AMPERE"; if (minor == 0) cudaCores = 64; else printf("Unknown device type\n"); break; default: //ARQUITECTURA DESCONOCIDA cudaCores = 0; printf("!!!!!dispositivo desconocido!!!!!\n"); } printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); cout << " Architecture: " << arch << endl; printf(" Compute Capability: %d.%d\n", major, minor); printf(" MultiProccessors: %d\n", SM); printf(" CUDA Cores (%dx%d): %d\n", cudaCores, SM, cudaCores*SM); printf(" GlobalMemory (total): %zu MiB\n", prop.totalGlobalMem/(1024*1024)); printf(" ConstMemory (total): %zu KiB\n", prop.totalConstMem/1024); printf(" sharedMemPerMultiprocessor: %zu\n", prop.sharedMemPerMultiprocessor); printf(" regsPerMultiprocessor: %d\n", prop.regsPerMultiprocessor); printf(" maxThreadsPerMultiProcessor: %d\n", prop.maxThreadsPerMultiProcessor); printf(" sharedMemPerBlock: %zu\n", prop.sharedMemPerBlock); printf(" regsPerBlock: %d\n", prop.regsPerBlock); printf(" maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock); printf(" x = %d\n", prop.maxThreadsDim[0]); printf(" y = %d\n", prop.maxThreadsDim[1]); printf(" z = %d\n", prop.maxThreadsDim[2]); printf(" maxThreadsDim: %d\n", prop.maxThreadsDim[3]); printf(" maxGridSize: %d\n", prop.maxGridSize[3]); printf(" x = %d\n", prop.maxGridSize[0]); printf(" y = %d\n", prop.maxGridSize[1]); printf(" z = %d\n", prop.maxGridSize[2]); printf(" warpSize: %d\n", prop.warpSize); printf(" memPitch: %d\n", prop.memPitch); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } }
0e1eb105eb0c6fac24f6446ea230fbc3792d6b13.cu
#include <cuda.h> #include <stdio.h> #include <iostream> #include <string> using namespace std; int main() { int driver_version = 0, runtime_version = 0; cudaDriverGetVersion(&driver_version); cudaRuntimeGetVersion(&runtime_version); printf("Driver Version: %d\n Runtime Version: %d\n", \ driver_version, runtime_version); int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); int cudaCores = 0; int SM = prop.multiProcessorCount; int major = prop.major; int minor = prop.minor; string arch = ""; switch (major) { case 1: arch = "TESLA"; cudaCores = 8; break; case 2: arch = "FERMI"; if (minor == 0) cudaCores = 32; else cudaCores = 48; break; case 3: arch = "KEPLER"; cudaCores = 192; break; case 5: arch = "MAXWELL"; cudaCores = 128; break; case 6: arch = "PASCAL"; if ((minor == 1) || (minor == 2)) cudaCores = 128; else if (minor == 0) cudaCores = SM * 64; else printf("Unknown device type\n"); break; case 7: if ((minor == 0) || (minor == 2)) { arch = "VOLTA"; cudaCores = 384; //tensorCores = 48; } if (minor == 5) arch = "TURING"; if ((minor == 0) || (minor == 5)) cudaCores = 64; else printf("Unknown device type\n"); break; case 8: arch = "AMPERE"; if (minor == 0) cudaCores = 64; else printf("Unknown device type\n"); break; default: //ARQUITECTURA DESCONOCIDA cudaCores = 0; printf("!!!!!dispositivo desconocido!!!!!\n"); } printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); cout << " Architecture: " << arch << endl; printf(" Compute Capability: %d.%d\n", major, minor); printf(" MultiProccessors: %d\n", SM); printf(" CUDA Cores (%dx%d): %d\n", cudaCores, SM, cudaCores*SM); printf(" GlobalMemory (total): %zu MiB\n", prop.totalGlobalMem/(1024*1024)); printf(" ConstMemory (total): %zu KiB\n", prop.totalConstMem/1024); printf(" sharedMemPerMultiprocessor: %zu\n", prop.sharedMemPerMultiprocessor); printf(" regsPerMultiprocessor: %d\n", prop.regsPerMultiprocessor); printf(" maxThreadsPerMultiProcessor: %d\n", prop.maxThreadsPerMultiProcessor); printf(" sharedMemPerBlock: %zu\n", prop.sharedMemPerBlock); printf(" regsPerBlock: %d\n", prop.regsPerBlock); printf(" maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock); printf(" x = %d\n", prop.maxThreadsDim[0]); printf(" y = %d\n", prop.maxThreadsDim[1]); printf(" z = %d\n", prop.maxThreadsDim[2]); printf(" maxThreadsDim: %d\n", prop.maxThreadsDim[3]); printf(" maxGridSize: %d\n", prop.maxGridSize[3]); printf(" x = %d\n", prop.maxGridSize[0]); printf(" y = %d\n", prop.maxGridSize[1]); printf(" z = %d\n", prop.maxGridSize[2]); printf(" warpSize: %d\n", prop.warpSize); printf(" memPitch: %d\n", prop.memPitch); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } }
838b7badaf16e30529dba5ff1933fbbd7eec473f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv3/highgui/highgui.hpp> #include <cstdio> #include <cassert> #include "highgui.h" #include "gpu_util.h" void disp_gcube(const std::string &window_name, gcube &image) { cv::namedWindow(window_name); cv::Mat I = image.cv_mat(); cv::imshow(window_name, I); } void disp_wait(void) { cv::waitKey(0); } int disp_keyPressed(void) { return cv::waitKey(30); } __global__ void GPU_rgb2gray(float *G, float *F, int n_rows, int n_cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= n_rows || j >= n_cols) { return; } float red = F[IJK2C(i, j, 0, n_rows, n_cols)]; float green = F[IJK2C(i, j, 1, n_rows, n_cols)]; float blue = F[IJK2C(i, j, 2, n_rows, n_cols)]; G[IJ2C(i, j, n_rows)] = red * 0.3f + green * 0.6f + blue * 0.1f; } gcube gpu_rgb2gray(const gcube &image) { assert(image.n_slices == 3); gcube G(image.n_rows, image.n_cols, 1); dim3 gridSize((image.n_cols-1)/16+1, (image.n_rows-1)/16+1, 1); dim3 blockSize(16, 16, 1); hipLaunchKernelGGL(( GPU_rgb2gray), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, image.d_pixels, image.n_rows, image.n_cols); checkCudaErrors(hipGetLastError()); return G; }
838b7badaf16e30529dba5ff1933fbbd7eec473f.cu
#include <opencv3/highgui/highgui.hpp> #include <cstdio> #include <cassert> #include "highgui.h" #include "gpu_util.h" void disp_gcube(const std::string &window_name, gcube &image) { cv::namedWindow(window_name); cv::Mat I = image.cv_mat(); cv::imshow(window_name, I); } void disp_wait(void) { cv::waitKey(0); } int disp_keyPressed(void) { return cv::waitKey(30); } __global__ void GPU_rgb2gray(float *G, float *F, int n_rows, int n_cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i >= n_rows || j >= n_cols) { return; } float red = F[IJK2C(i, j, 0, n_rows, n_cols)]; float green = F[IJK2C(i, j, 1, n_rows, n_cols)]; float blue = F[IJK2C(i, j, 2, n_rows, n_cols)]; G[IJ2C(i, j, n_rows)] = red * 0.3f + green * 0.6f + blue * 0.1f; } gcube gpu_rgb2gray(const gcube &image) { assert(image.n_slices == 3); gcube G(image.n_rows, image.n_cols, 1); dim3 gridSize((image.n_cols-1)/16+1, (image.n_rows-1)/16+1, 1); dim3 blockSize(16, 16, 1); GPU_rgb2gray<<<gridSize, blockSize>>>( G.d_pixels, image.d_pixels, image.n_rows, image.n_cols); checkCudaErrors(cudaGetLastError()); return G; }
b423930e7f54b0e8a80af9f79064580380fc8fb3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/communicator/data_parallel_communicator.hpp> #include <algorithm> #include <cstdlib> #include <memory> namespace nbla { using std::make_shared; template <typename T> __global__ void kernel_divide_inplace(const int size, const int n_devices, T *dw) { NBLA_CUDA_KERNEL_LOOP(i, size) { dw[i] /= n_devices; } } template <typename T> DataParallelCommunicatorNccl<T>::DataParallelCommunicatorNccl( const Context &ctx) : DataParallelCommunicator<T>(ctx) {} template <typename T> DataParallelCommunicatorNccl<T>::~DataParallelCommunicatorNccl() { if (this->initialized_) { for (int i = 0; i < device_ids_.size(); ++i) { ncclCommDestroy(comms_[i]); NBLA_CUDA_CHECK(hipStreamDestroy(streams_[i])); } } } template <typename T> void DataParallelCommunicatorNccl<T>::init() { Communicator::init(); try { // Set gpu information for (auto ctx : this->contexts_) { this->device_ids_.push_back(std::stoi(ctx.device_id)); } this->n_devices_ = this->device_ids_.size(); // Initialize stream and communicator for (int i = 0; i < n_devices_; ++i) { cuda_set_device(device_ids_[i]); // Stream hipStream_t stream; NBLA_CUDA_CHECK(hipStreamCreate(&stream)); streams_.push_back(stream); // NCCL Comm ncclComm_t comm; comms_.push_back(comm); } ncclResult_t res = ncclCommInitAll(comms_.data(), this->n_devices_, this->device_ids_.data()); if (res != 0) { NBLA_ERROR(error_code::target_specific, "ncclCommInitAll fails with %d"); } } catch (...) { this->initialized_ = false; } this->initialized_ = true; } template <typename T> void DataParallelCommunicatorNccl<T>::reduce( const vector<NdArrayPtr> &ndarray_list, int dst, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce(NdArrayPtr ndarray, int dst, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allreduce(bool division, bool inplace) { // TODO: currently nnabla uses default stream for computation. // The following logic relies on that, so if nnabla uses another stream for // computation, // we have to issue null kernel to the default stream at the beginning of this // method // and at the end of this method for using the implicit synchronization // technique for // main thread not to wait for a result of a kernel call. if (inplace == true) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allreduce with out-of-place is only implemented.") } // Once sync to prevent the hang where the memcpy occurs during the allreduce. this->sync_all_params(); // 1. copy inside device for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto func_named_param = this->device_func_named_param_[i]; auto comm = comms_[i]; auto stream = streams_[i]; shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here? make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx); T *buff = arr_buff->pointer<T>(); Size_t type_size = sizeof(T); for (auto elm : func_named_param) { VariablePtr vp = elm.second; const T *dw = vp->get_grad_pointer<T>(ctx); auto n_param = vp->size(); hipMemcpyAsync(buff, dw, type_size * n_param, hipMemcpyDeviceToDevice, stream); buff += n_param; } } // 2. allreduce #ifdef NCCL_MAJOR ncclGroupStart(); #endif for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; // cuda_set_device(device_id); auto comm = comms_[i]; auto stream = streams_[i]; shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here? make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx); T *buff = arr_buff->pointer<T>(); ncclResult_t ret = ncclAllReduce(buff, buff, this->total_params_, ncclFloat, // TODO: address ncclFloat ncclSum, comm, 0); // use default stream if (ret != ncclSuccess) { NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.", ret); } } #ifdef NCCL_MAJOR ncclGroupEnd(); // wait_by_streams_synchronization(); #endif // 3. divide if (division) { for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto comm = comms_[i]; auto stream = streams_[i]; shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here? make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx); T *buff = arr_buff->pointer<T>(); NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, this->total_params_, n_devices_, buff); } } // 4. copy back inside device for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto func_named_param = this->device_func_named_param_[i]; auto comm = comms_[i]; auto stream = streams_[i]; shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here? make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx); T *buff = arr_buff->pointer<T>(); Size_t type_size = sizeof(T); for (auto elm : func_named_param) { VariablePtr vp = elm.second; T *dw = vp->cast_grad_and_get_pointer<T>(ctx); auto n_param = vp->size(); hipMemcpyAsync(dw, buff, type_size * n_param, hipMemcpyDeviceToDevice, stream); buff += n_param; } } } template <typename T> void DataParallelCommunicatorNccl<T>::all_reduce( const vector<NdArrayPtr> &ndarray_list, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::all_reduce(NdArrayPtr ndarray, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce_scatter( const vector<NdArrayPtr> &ndarray_list, NdArrayPtr ndarray, bool division, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce_scatter is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast( const vector<NdArrayPtr> &ndarray_list, int src, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast(NdArrayPtr ndarray, int src, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::all_gather( NdArrayPtr ndarray, const vector<NdArrayPtr> &ndarray_list, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_gather is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce_async(bool division) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allreduce_async(bool division, bool inplace) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allreduce_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reducescatter_async(bool division) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reducescatter_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast_async() { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allgather_async() { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allgather_async is not implemented.") } template <typename T> vector<string> DataParallelCommunicatorNccl<T>::allowed_array_classes() { NBLA_ERROR(error_code::not_implemented, "Derived class of " "DataParallelCommunicatorNccl must " "implement allowed_array_classes().") } template <typename T> void DataParallelCommunicatorNccl<T>::wait_by_devices_synchronization() { for (int i = 0; i < device_ids_.size(); ++i) { cuda_device_synchronize(device_ids_[i]); } } template <typename T> void DataParallelCommunicatorNccl<T>::wait_by_streams_synchronization() { for (int i = 0; i < device_ids_.size(); ++i) { cuda_set_device(device_ids_[i]); NBLA_CUDA_CHECK(hipStreamSynchronize(streams_[i])); } } template <typename T> void DataParallelCommunicatorNccl<T>::divide_by_num_divices(bool division) { if (division) { for (int i = 0; i < device_ids_.size(); ++i) { auto device_id = device_ids_[i]; cuda_set_device(device_id); Context ctx = this->contexts_[i]; auto func_named_param = this->device_func_named_param_[i]; auto stream = streams_[i]; for (auto elm : func_named_param) { VariablePtr vp = elm.second; T *dw = vp->cast_grad_and_get_pointer<T>(ctx); auto n_param = vp->size(); NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, n_param, n_devices_, dw); } } } } template <typename T> void DataParallelCommunicatorNccl<T>::sync_all_params() { for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; auto func_named_param = this->device_func_named_param_[i]; auto size = func_named_param.size(); for (auto elm : func_named_param) { // function-loop VariablePtr vp = elm.second; // If the arrays are different, output the warning. this->check_array_class(ctx, vp); // Sync vp->get_grad_pointer<T>(ctx); } } } template class DataParallelCommunicatorNccl<float>; }
b423930e7f54b0e8a80af9f79064580380fc8fb3.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/communicator/data_parallel_communicator.hpp> #include <algorithm> #include <cstdlib> #include <memory> namespace nbla { using std::make_shared; template <typename T> __global__ void kernel_divide_inplace(const int size, const int n_devices, T *dw) { NBLA_CUDA_KERNEL_LOOP(i, size) { dw[i] /= n_devices; } } template <typename T> DataParallelCommunicatorNccl<T>::DataParallelCommunicatorNccl( const Context &ctx) : DataParallelCommunicator<T>(ctx) {} template <typename T> DataParallelCommunicatorNccl<T>::~DataParallelCommunicatorNccl() { if (this->initialized_) { for (int i = 0; i < device_ids_.size(); ++i) { ncclCommDestroy(comms_[i]); NBLA_CUDA_CHECK(cudaStreamDestroy(streams_[i])); } } } template <typename T> void DataParallelCommunicatorNccl<T>::init() { Communicator::init(); try { // Set gpu information for (auto ctx : this->contexts_) { this->device_ids_.push_back(std::stoi(ctx.device_id)); } this->n_devices_ = this->device_ids_.size(); // Initialize stream and communicator for (int i = 0; i < n_devices_; ++i) { cuda_set_device(device_ids_[i]); // Stream cudaStream_t stream; NBLA_CUDA_CHECK(cudaStreamCreate(&stream)); streams_.push_back(stream); // NCCL Comm ncclComm_t comm; comms_.push_back(comm); } ncclResult_t res = ncclCommInitAll(comms_.data(), this->n_devices_, this->device_ids_.data()); if (res != 0) { NBLA_ERROR(error_code::target_specific, "ncclCommInitAll fails with %d"); } } catch (...) { this->initialized_ = false; } this->initialized_ = true; } template <typename T> void DataParallelCommunicatorNccl<T>::reduce( const vector<NdArrayPtr> &ndarray_list, int dst, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce(NdArrayPtr ndarray, int dst, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allreduce(bool division, bool inplace) { // TODO: currently nnabla uses default stream for computation. // The following logic relies on that, so if nnabla uses another stream for // computation, // we have to issue null kernel to the default stream at the beginning of this // method // and at the end of this method for using the implicit synchronization // technique for // main thread not to wait for a result of a kernel call. if (inplace == true) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allreduce with out-of-place is only implemented.") } // Once sync to prevent the hang where the memcpy occurs during the allreduce. this->sync_all_params(); // 1. copy inside device for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto func_named_param = this->device_func_named_param_[i]; auto comm = comms_[i]; auto stream = streams_[i]; shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here? make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx); T *buff = arr_buff->pointer<T>(); Size_t type_size = sizeof(T); for (auto elm : func_named_param) { VariablePtr vp = elm.second; const T *dw = vp->get_grad_pointer<T>(ctx); auto n_param = vp->size(); cudaMemcpyAsync(buff, dw, type_size * n_param, cudaMemcpyDeviceToDevice, stream); buff += n_param; } } // 2. allreduce #ifdef NCCL_MAJOR ncclGroupStart(); #endif for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; // cuda_set_device(device_id); auto comm = comms_[i]; auto stream = streams_[i]; shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here? make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx); T *buff = arr_buff->pointer<T>(); ncclResult_t ret = ncclAllReduce(buff, buff, this->total_params_, ncclFloat, // TODO: address ncclFloat ncclSum, comm, 0); // use default stream if (ret != ncclSuccess) { NBLA_ERROR(error_code::target_specific, "ncclAllReduce fails with %d.", ret); } } #ifdef NCCL_MAJOR ncclGroupEnd(); // wait_by_streams_synchronization(); #endif // 3. divide if (division) { for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto comm = comms_[i]; auto stream = streams_[i]; shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here? make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx); T *buff = arr_buff->pointer<T>(); NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, this->total_params_, n_devices_, buff); } } // 4. copy back inside device for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; cuda_set_device(device_id); auto func_named_param = this->device_func_named_param_[i]; auto comm = comms_[i]; auto stream = streams_[i]; shared_ptr<CudaCachedArray> arr_buff = // TODO: address 16 bits also here? make_shared<CudaCachedArray>(this->total_params_, get_dtype<T>(), ctx); T *buff = arr_buff->pointer<T>(); Size_t type_size = sizeof(T); for (auto elm : func_named_param) { VariablePtr vp = elm.second; T *dw = vp->cast_grad_and_get_pointer<T>(ctx); auto n_param = vp->size(); cudaMemcpyAsync(dw, buff, type_size * n_param, cudaMemcpyDeviceToDevice, stream); buff += n_param; } } } template <typename T> void DataParallelCommunicatorNccl<T>::all_reduce( const vector<NdArrayPtr> &ndarray_list, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::all_reduce(NdArrayPtr ndarray, bool division, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_reduce is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce_scatter( const vector<NdArrayPtr> &ndarray_list, NdArrayPtr ndarray, bool division, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce_scatter is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast( const vector<NdArrayPtr> &ndarray_list, int src, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast(NdArrayPtr ndarray, int src, bool inplace, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::all_gather( NdArrayPtr ndarray, const vector<NdArrayPtr> &ndarray_list, const string &group) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU all_gather is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reduce_async(bool division) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reduce_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allreduce_async(bool division, bool inplace) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allreduce_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::reducescatter_async(bool division) { NBLA_ERROR(error_code::not_implemented, "CUDA GPU reducescatter_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::bcast_async() { NBLA_ERROR(error_code::not_implemented, "CUDA GPU bcast_async is not implemented.") } template <typename T> void DataParallelCommunicatorNccl<T>::allgather_async() { NBLA_ERROR(error_code::not_implemented, "CUDA GPU allgather_async is not implemented.") } template <typename T> vector<string> DataParallelCommunicatorNccl<T>::allowed_array_classes() { NBLA_ERROR(error_code::not_implemented, "Derived class of " "DataParallelCommunicatorNccl must " "implement allowed_array_classes().") } template <typename T> void DataParallelCommunicatorNccl<T>::wait_by_devices_synchronization() { for (int i = 0; i < device_ids_.size(); ++i) { cuda_device_synchronize(device_ids_[i]); } } template <typename T> void DataParallelCommunicatorNccl<T>::wait_by_streams_synchronization() { for (int i = 0; i < device_ids_.size(); ++i) { cuda_set_device(device_ids_[i]); NBLA_CUDA_CHECK(cudaStreamSynchronize(streams_[i])); } } template <typename T> void DataParallelCommunicatorNccl<T>::divide_by_num_divices(bool division) { if (division) { for (int i = 0; i < device_ids_.size(); ++i) { auto device_id = device_ids_[i]; cuda_set_device(device_id); Context ctx = this->contexts_[i]; auto func_named_param = this->device_func_named_param_[i]; auto stream = streams_[i]; for (auto elm : func_named_param) { VariablePtr vp = elm.second; T *dw = vp->cast_grad_and_get_pointer<T>(ctx); auto n_param = vp->size(); NBLA_CUDA_LAUNCH_KERNEL_IN_STREAM(kernel_divide_inplace, stream, n_param, n_devices_, dw); } } } } template <typename T> void DataParallelCommunicatorNccl<T>::sync_all_params() { for (int i = 0; i < device_ids_.size(); ++i) { // device-loop Context ctx = this->contexts_[i]; auto device_id = device_ids_[i]; auto func_named_param = this->device_func_named_param_[i]; auto size = func_named_param.size(); for (auto elm : func_named_param) { // function-loop VariablePtr vp = elm.second; // If the arrays are different, output the warning. this->check_array_class(ctx, vp); // Sync vp->get_grad_pointer<T>(ctx); } } } template class DataParallelCommunicatorNccl<float>; }
b2c6369054346f7d502e1db5aab7916de84cfba7.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/layers/absval_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { template<typename Dtype> void AbsValLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int_tp count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); #endif // USE_ROCM } else { #ifdef USE_GREENTEA greentea_gpu_abs<Dtype>(this->device_->id(), count, (cl_mem) (bottom[0]->gpu_data()), 0, (cl_mem) (top_data), 0); #endif // USE_GREENTEA } } template<typename Dtype> void AbsValLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int_tp count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM caffe_gpu_sign(count, bottom_data, bottom_diff); caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); #endif // USE_ROCM } else { #ifdef USE_GREENTEA greentea_gpu_sign<Dtype>(this->device_->id(), count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff, 0); greentea_gpu_mul<Dtype>(this->device_->id(), count, (cl_mem) bottom_diff, 0, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); } // namespace caffe
b2c6369054346f7d502e1db5aab7916de84cfba7.cu
#include <vector> #include "caffe/layers/absval_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { template<typename Dtype> void AbsValLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int_tp count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); #endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_abs<Dtype>(this->device_->id(), count, (cl_mem) (bottom[0]->gpu_data()), 0, (cl_mem) (top_data), 0); #endif // USE_GREENTEA } } template<typename Dtype> void AbsValLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int_tp count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_gpu_sign(count, bottom_data, bottom_diff); caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); #endif // USE_CUDA } else { #ifdef USE_GREENTEA greentea_gpu_sign<Dtype>(this->device_->id(), count, (cl_mem) bottom_data, 0, (cl_mem) bottom_diff, 0); greentea_gpu_mul<Dtype>(this->device_->id(), count, (cl_mem) bottom_diff, 0, (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(AbsValLayer); } // namespace caffe
35a7fde91412c40d57d02e6d1d915edc86c669cf.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <host_defines.h> #include <string.h> #include <stdio.h> #include "cudainfo.h" #if TORCH_HIP_VERSION < 5050 #error CUDA 1.x - 5.0 are not supported any more! Please use CUDA Toolkit 5.5+ instead. #endif #define CZ_COPY_BUF_SIZE (16 * (1 << 20)) /*!< Transfer buffer size. */ #define CZ_COPY_LOOPS_NUM 8 /*!< Number of loops to run transfer test to. */ #define CZ_CALC_BLOCK_LOOPS 16 /*!< Number of loops to run calculation loop. */ #define CZ_CALC_BLOCK_SIZE 256 /*!< Size of instruction block. */ #define CZ_CALC_BLOCK_NUM 16 /*!< Number of instruction blocks in loop. */ #define CZ_CALC_OPS_NUM 2 /*!< Number of operations per one loop. */ #define CZ_CALC_LOOPS_NUM 8 /*!< Number of loops to run performance test to. */ #define CZ_DEF_WARP_SIZE 32 /*!< Default warp size value. */ #define CZ_DEF_THREADS_MAX 512 /*!< Default max threads value value. */ #define CZ_VER_STR_LEN 256 /*!< Version string length. */ /*! \brief Error handling of CUDA RT calls. */ #define CZ_CUDA_CALL(funcCall, errProc) \ { \ hipError_t errCode; \ if((errCode = (funcCall)) != hipSuccess) { \ printf("CUDA Error: %08x %s", errCode, hipGetErrorString(errCode)); \ errProc; \ } \ } /*! \brief Check how many CUDA-devices are present. \return number of CUDA-devices in case of success, \a 0 if no CUDA-devies were found. */ int CZCudaDeviceFound(void) { int count; CZ_CUDA_CALL(hipGetDeviceCount(&count), return 0); return count; } /*! \def ConvertSMVer2Cores(major, minor) \brief Get number of CUDA cores per multiprocessor. \arg[in] major GPU Architecture major version. \arg[in] minor GPU Architecture minor version. \returns 0 if GPU Architecture is unknown, or number of CUDA cores per multiprocessor. */ #define ConvertSMVer2Cores(major, minor) \ (((major) == 1)? ( /* Tesla */ \ ((minor) == 0)? 8: /* G80*/ \ ((minor) == 1)? 8: /* G8x, G9x */ \ ((minor) == 2)? 8: /* GT21x */ \ ((minor) == 3)? 8: /* GT200 */ \ 0): \ ((major) == 2)? ( /* Fermi */ \ ((minor) == 0)? 32: /* GF100, GF110 */ \ ((minor) == 1)? 48: /* GF10x, FG11x */ \ 0): \ ((major) == 3)? ( /* Kepler */ \ ((minor) == 0)? 192: /* GK10x */ \ ((minor) == 2)? 192: /* Tegra K1 */ \ ((minor) == 5)? 192: /* GK11x, GK208 */ \ ((minor) == 7)? 192: /* GK210 */ \ 0): \ ((major) == 5)? ( /* Maxwell */ \ ((minor) == 0)? 128: /* GM10X */ \ ((minor) == 2)? 128: /* GM20X */ \ ((minor) == 3)? 128: /* Tegra X1 */ \ 0): \ 0) void getValue1024(double value, char *valueStr, int prefix = 0, const char *unit = "B") { const int prefixBase = 1024; int resPrefix = prefix; static const char *prefixTab[9] = { "", /* prefixNothing */ "K", /* prefixK */ "M", /* prefixM */ "G", /* prefixG */ "T", /* prefixT */ "P", /* prefixP */ "E", /* prefixE */ "Z", /* prefixZ */ "Y", /* prefixY */ }; while((value > prefixBase) && (resPrefix < 9)) { value /= prefixBase; resPrefix++; } sprintf(valueStr, "%.2f %s%s", value, prefixTab[resPrefix], unit); } /*! \brief Local service data structure for bandwith calulations. * */ struct CZDeviceInfoBandLocalData { void *memHostPage; /*!< Pageable host memory. */ void *memHostPin; /*!< Pinned host memory. */ void *memDevice1; /*!< Device memory buffer 1. */ void *memDevice2; /*!< Device memory buffer 2. */ }; /*! \brief Set device for current thread. * */ int CZCudaCalcDeviceSelect( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { CZ_CUDA_CALL(hipSetDevice(info->num), return -1); return 0; } /*! \brief Allocate buffers for bandwidth calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDeviceBandwidthAlloc( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { CZDeviceInfoBandLocalData *lData; if(info == NULL) return -1; if(info->band.localData == NULL) { //CZLog(CZLogLevelLow, "Alloc local buffers for %s.", info->deviceName); lData = (CZDeviceInfoBandLocalData*)malloc(sizeof(*lData)); if(lData == NULL) { return -1; } //CZLog(CZLogLevelLow, "Alloc host pageable for %s.", info->deviceName); lData->memHostPage = (void*)malloc(CZ_COPY_BUF_SIZE); if(lData->memHostPage == NULL) { free(lData); return -1; } //CZLog(CZLogLevelLow, "Host pageable is at 0x%08X.", lData->memHostPage); //CZLog(CZLogLevelLow, "Alloc host pinned for %s.", info->deviceName); CZ_CUDA_CALL(hipHostMalloc((void**)&lData->memHostPin, CZ_COPY_BUF_SIZE), free(lData->memHostPage); free(lData); return -1); //CZLog(CZLogLevelLow, "Host pinned is at 0x%08X.", lData->memHostPin); //CZLog(CZLogLevelLow, "Alloc device buffer 1 for %s.", info->deviceName); CZ_CUDA_CALL(hipMalloc((void**)&lData->memDevice1, CZ_COPY_BUF_SIZE), hipHostFree(lData->memHostPin); free(lData->memHostPage); free(lData); return -1); //CZLog(CZLogLevelLow, "Device buffer 1 is at 0x%08X.", lData->memDevice1); //CZLog(CZLogLevelLow, "Alloc device buffer 2 for %s.", info->deviceName); CZ_CUDA_CALL(hipMalloc((void**)&lData->memDevice2, CZ_COPY_BUF_SIZE), hipFree(lData->memDevice1); hipHostFree(lData->memHostPin); free(lData->memHostPage); free(lData); return -1); //CZLog(CZLogLevelLow, "Device buffer 2 is at 0x%08X.", lData->memDevice2); info->band.localData = (void*)lData; } return 0; } /*! \brief Free buffers for bandwidth calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDeviceBandwidthFree( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { CZDeviceInfoBandLocalData *lData; if(info == NULL) return -1; lData = (CZDeviceInfoBandLocalData*)info->band.localData; if(lData != NULL) { //CZLog(CZLogLevelLow, "Free host pageable for %s.", info->deviceName); if(lData->memHostPage != NULL) free(lData->memHostPage); //CZLog(CZLogLevelLow, "Free host pinned for %s.", info->deviceName); if(lData->memHostPin != NULL) hipHostFree(lData->memHostPin); //CZLog(CZLogLevelLow, "Free device buffer 1 for %s.", info->deviceName); if(lData->memDevice1 != NULL) hipFree(lData->memDevice1); //CZLog(CZLogLevelLow, "Free device buffer 2 for %s.", info->deviceName); if(lData->memDevice2 != NULL) hipFree(lData->memDevice2); //CZLog(CZLogLevelLow, "Free local buffers for %s.", info->deviceName); free(lData); } info->band.localData = NULL; return 0; } /*! \brief Reset results of bandwidth calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDeviceBandwidthReset( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; info->band.copyHDPage = 0; info->band.copyHDPin = 0; info->band.copyDHPage = 0; info->band.copyDHPin = 0; info->band.copyDD = 0; return 0; } #define CZ_COPY_MODE_H2D 0 /*!< Host to device data copy mode. */ #define CZ_COPY_MODE_D2H 1 /*!< Device to host data copy mode. */ #define CZ_COPY_MODE_D2D 2 /*!< Device to device data copy mode. */ /*! \brief Run data transfer bandwidth tests. * \return \a 0 in case of success, \a other is value in KiB/s. * */ static float CZCudaCalcDeviceBandwidthTestCommon ( struct CZDeviceInfo *info, /*!<[in,out] CUDA-device information. */ int mode, /*!<[in] Run bandwidth test in one of modes. */ int pinned /*!<[in] Use pinned \a (=1) memory buffer instead of pagable \a (=0). */ ) { CZDeviceInfoBandLocalData *lData; float timeMs = 0.0; float bandwidthKiBs = 0.0; hipEvent_t start; hipEvent_t stop; void *memHost; void *memDevice1; void *memDevice2; int i; if(info == NULL) return 0; CZ_CUDA_CALL(hipEventCreate(&start), return 0); CZ_CUDA_CALL(hipEventCreate(&stop), hipEventDestroy(start); return 0); lData = (CZDeviceInfoBandLocalData*)info->band.localData; memHost = pinned? lData->memHostPin: lData->memHostPage; memDevice1 = lData->memDevice1; memDevice2 = lData->memDevice2; /*CZLog(CZLogLevelLow, "Starting %s test (%s) on %s.", (mode == CZ_COPY_MODE_H2D)? "host to device": (mode == CZ_COPY_MODE_D2H)? "device to host": (mode == CZ_COPY_MODE_D2D)? "device to device": "unknown", pinned? "pinned": "pageable", info->deviceName);*/ for(i = 0; i < CZ_COPY_LOOPS_NUM; i++) { float loopMs = 0.0; CZ_CUDA_CALL(hipEventRecord(start, 0), hipEventDestroy(start); hipEventDestroy(stop); return 0); switch(mode) { case CZ_COPY_MODE_H2D: CZ_CUDA_CALL(hipMemcpy(memDevice1, memHost, CZ_COPY_BUF_SIZE, hipMemcpyHostToDevice), hipEventDestroy(start); hipEventDestroy(stop); return 0); break; case CZ_COPY_MODE_D2H: CZ_CUDA_CALL(hipMemcpy(memHost, memDevice2, CZ_COPY_BUF_SIZE, hipMemcpyDeviceToHost), hipEventDestroy(start); hipEventDestroy(stop); return 0); break; case CZ_COPY_MODE_D2D: CZ_CUDA_CALL(hipMemcpy(memDevice2, memDevice1, CZ_COPY_BUF_SIZE, hipMemcpyDeviceToDevice), hipEventDestroy(start); hipEventDestroy(stop); return 0); break; default: // WTF! hipEventDestroy(start); hipEventDestroy(stop); return 0; } CZ_CUDA_CALL(hipEventRecord(stop, 0), hipEventDestroy(start); hipEventDestroy(stop); return 0); CZ_CUDA_CALL(hipEventSynchronize(stop), hipEventDestroy(start); hipEventDestroy(stop); return 0); CZ_CUDA_CALL(hipEventElapsedTime(&loopMs, start, stop), hipEventDestroy(start); hipEventDestroy(stop); return 0); timeMs += loopMs; } //CZLog(CZLogLevelLow, "Test complete in %f ms.", timeMs); bandwidthKiBs = ( 1000 * (float)CZ_COPY_BUF_SIZE * (float)CZ_COPY_LOOPS_NUM ) / ( timeMs * (float)(1 << 10) ); hipEventDestroy(start); hipEventDestroy(stop); return bandwidthKiBs; } /*! \brief Run several bandwidth tests. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDeviceBandwidthTest( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { info->band.copyHDPage = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_H2D, 0); info->band.copyHDPin = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_H2D, 1); info->band.copyDHPage = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_D2H, 0); info->band.copyDHPin = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_D2H, 1); info->band.copyDD = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_D2D, 0); return 0; } /*! \brief Calculate bandwidth information about CUDA-device. * \return \a 0 in case of success, \a -1 in case of error. * */ int CZCudaCalcDeviceBandwidth( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; if (CZCudaCalcDeviceSelect(info) != 0) return -1; if(CZCudaCalcDeviceBandwidthReset(info) != 0) return -1; if(CZCudaCalcDeviceBandwidthAlloc(info) != 0) return -1; if(CZCudaCalcDeviceBandwidthTest(info) != 0) return -1; // CZCudaCalcDeviceBandwidthFree(info); return 0; } /*! \brief Cleanup after test and bandwidth calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ int CZCudaCleanDevice( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; if(CZCudaCalcDeviceBandwidthFree(info) != 0) return -1; return 0; } /*! \brief Reset results of preformance calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDevicePerformanceReset( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; info->perf.calcFloat = 0; info->perf.calcDouble = 0; info->perf.calcInteger32 = 0; info->perf.calcInteger24 = 0; info->perf.calcInteger64 = 0; return 0; } /*! \brief 16 MAD instructions for float point test. * */ #define CZ_CALC_FMAD_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /*! \brief 256 MAD instructions for float point test. * */ #define CZ_CALC_FMAD_256(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ /*! \brief 16 DMAD instructions for double-precision test. * */ #define CZ_CALC_DFMAD_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /* a = fma(a, a, a); b = fma(b, b, b); a = fma(a, a, a); b = fma(b, b, b); \ * a = fma(a, a, a); b = fma(b, b, b); a = fma(a, a, a); b = fma(b, b, b); \ * a = fma(a, a, a); b = fma(b, b, b); a = fma(a, a, a); b = fma(b, b, b); \ * a = fma(a, a, a); b = fma(b, b, b); a = fma(a, a, a); b = fma(b, b, b); \*/ /*! \brief 256 MAD instructions for float point test. * */ #define CZ_CALC_DFMAD_256(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ /*! \brief 16 MAD instructions for 32-bit integer test. * */ #define CZ_CALC_IMAD32_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /*! \brief 256 MAD instructions for 32-bit integer test. * */ #define CZ_CALC_IMAD32_256(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ /*! \brief 16 MAD instructions for 64-bit integer test. * */ #define CZ_CALC_IMAD64_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /*! \brief 256 MAD instructions for 64-bit integer test. * */ #define CZ_CALC_IMAD64_256(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ /*! \brief 16 MAD instructions for 24-bit integer test. * */ #define CZ_CALC_IMAD24_16(a, b) \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ /*! \brief 256 MAD instructions for 24-bit integer test. * */ #define CZ_CALC_IMAD24_256(a, b) \ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ /*! \brief 16 MAD instructions for 8-bit integer test. * */ #define CZ_CALC_IMAD8_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /*! \brief 256 MAD instructions for 8-bit integer test. * */ #define CZ_CALC_IMAD8_256(a, b) \ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ #define CZ_CALC_MODE_FLOAT 0 /*!< Single-precision float point test mode. */ #define CZ_CALC_MODE_DOUBLE 1 /*!< Double-precision float point test mode. */ #define CZ_CALC_MODE_INTEGER8 2 /*!< 8-bit integer test mode. */ #define CZ_CALC_MODE_INTEGER32 3 /*!< 32-bit integer test mode. */ #define CZ_CALC_MODE_INTEGER24 4 /*!< 24-bit integer test mode. */ #define CZ_CALC_MODE_INTEGER64 5 /*!< 64-bit integer test mode. */ /*! \brief GPU code for float point test. * */ __global__ void CZCudaCalcKernelFloat( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; float *arr = (float*)buf; float val1 = index; float val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for double-precision test. * */ __global__ void CZCudaCalcKernelDouble( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; double *arr = (double*)buf; double val1 = index; double val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for 8-bit integer test. * */ __global__ void CZCudaCalcKernelInteger8( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; char *arr = (char*)buf; int val1 = index; int val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for 32-bit integer test. * */ __global__ void CZCudaCalcKernelInteger32( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; int *arr = (int*)buf; int val1 = index; int val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for 24-bit integer test. * */ __global__ void CZCudaCalcKernelInteger24( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; int *arr = (int*)buf; int val1 = index; int val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for 64-bit integer test. * */ __global__ void CZCudaCalcKernelInteger64( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; long long *arr = (long long*)buf; long long val1 = index; long long val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief Run GPU calculation performace tests. * \return \a 0 in case of success, \a -1 in case of error. * */ static float CZCudaCalcDevicePerformanceTest( struct CZDeviceInfo *info, /*!<[in,out] CUDA-device information. */ int mode /*!<[in] Run performance test in one of modes. */ ) { CZDeviceInfoBandLocalData *lData; float timeMs = 0.0; float performanceKOPs = 0.0; hipEvent_t start; hipEvent_t stop; int blocksNum = info->heavyMode? info->core.muliProcCount: 1; int i; if(info == NULL) return 0; CZ_CUDA_CALL(hipEventCreate(&start), return 0); CZ_CUDA_CALL(hipEventCreate(&stop), hipEventDestroy(start); return 0); if (info->band.localData == NULL) { lData = (CZDeviceInfoBandLocalData*)malloc(sizeof(CZDeviceInfoBandLocalData)); if(lData == NULL) { return 0; } memset(lData, 0, sizeof(CZDeviceInfoBandLocalData)); info->band.localData = lData; } if (lData->memDevice1 == NULL) { CZ_CUDA_CALL(hipMalloc((void**)&lData->memDevice1, CZ_COPY_BUF_SIZE), free(lData); return 0); } int threadsNum = info->core.maxThreadsPerBlock; if(threadsNum == 0) { int warpSize = info->core.SIMDWidth; if(warpSize == 0) warpSize = CZ_DEF_WARP_SIZE; threadsNum = warpSize * 2; if(threadsNum > CZ_DEF_THREADS_MAX) threadsNum = CZ_DEF_THREADS_MAX; } /* CZLog(CZLogLevelLow, "Starting %s test on %s on %d block(s) %d thread(s) each.", (mode == CZ_CALC_MODE_FLOAT)? "single-precision float": (mode == CZ_CALC_MODE_DOUBLE)? "double-precision float": (mode == CZ_CALC_MODE_INTEGER8)? "8-bit integer": (mode == CZ_CALC_MODE_INTEGER32)? "32-bit integer": (mode == CZ_CALC_MODE_INTEGER24)? "24-bit integer": (mode == CZ_CALC_MODE_INTEGER64)? "64-bit integer": "unknown", info->deviceName, blocksNum, threadsNum);*/ for(i = 0; i < CZ_CALC_LOOPS_NUM; i++) { float loopMs = 0.0; CZ_CUDA_CALL(hipEventRecord(start, 0), hipEventDestroy(start); hipEventDestroy(stop); return 0); switch(mode) { case CZ_CALC_MODE_FLOAT: hipLaunchKernelGGL(( CZCudaCalcKernelFloat), dim3(blocksNum), dim3(threadsNum), 0, 0, lData->memDevice1); break; case CZ_CALC_MODE_DOUBLE: hipLaunchKernelGGL(( CZCudaCalcKernelDouble), dim3(blocksNum), dim3(threadsNum), 0, 0, lData->memDevice1); break; case CZ_CALC_MODE_INTEGER8: hipLaunchKernelGGL(( CZCudaCalcKernelInteger8), dim3(blocksNum), dim3(threadsNum), 0, 0, lData->memDevice1); break; case CZ_CALC_MODE_INTEGER32: hipLaunchKernelGGL(( CZCudaCalcKernelInteger32), dim3(blocksNum), dim3(threadsNum), 0, 0, lData->memDevice1); break; case CZ_CALC_MODE_INTEGER24: hipLaunchKernelGGL(( CZCudaCalcKernelInteger24), dim3(blocksNum), dim3(threadsNum), 0, 0, lData->memDevice1); break; case CZ_CALC_MODE_INTEGER64: hipLaunchKernelGGL(( CZCudaCalcKernelInteger64), dim3(blocksNum), dim3(threadsNum), 0, 0, lData->memDevice1); break; default: // WTF! hipEventDestroy(start); hipEventDestroy(stop); return 0; } CZ_CUDA_CALL(hipGetLastError(), hipEventDestroy(start); hipEventDestroy(stop); return 0); CZ_CUDA_CALL(hipEventRecord(stop, 0), hipEventDestroy(start); hipEventDestroy(stop); return 0); CZ_CUDA_CALL(hipEventSynchronize(stop), hipEventDestroy(start); hipEventDestroy(stop); return 0); CZ_CUDA_CALL(hipEventElapsedTime(&loopMs, start, stop), hipEventDestroy(start); hipEventDestroy(stop); return 0); timeMs += loopMs; } //CZLog(CZLogLevelLow, "Test complete in %f ms.", timeMs); performanceKOPs = ( (float)info->core.muliProcCount * (float)CZ_CALC_LOOPS_NUM * (float)threadsNum * (float)CZ_CALC_BLOCK_LOOPS * (float)CZ_CALC_OPS_NUM * (float)CZ_CALC_BLOCK_SIZE * (float)CZ_CALC_BLOCK_NUM ) / (float)timeMs; hipEventDestroy(start); hipEventDestroy(stop); CZCudaCalcDeviceBandwidthFree(info); return performanceKOPs; } /*! \brief Calculate performance information about CUDA-device. * \return \a 0 in case of success, \a -1 in case of error. * */ int CZCudaCalcDevicePerformance( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; if (CZCudaCalcDeviceSelect(info) != 0) return -1; if(CZCudaCalcDevicePerformanceReset(info) != 0) return -1; info->perf.calcFloat = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_FLOAT); if(((info->major > 1)) || ((info->major == 1) && (info->minor >= 3))) info->perf.calcDouble = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_DOUBLE); //info->perf.calcInteger8 = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_INTEGER8); info->perf.calcInteger32 = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_INTEGER32); info->perf.calcInteger24 = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_INTEGER24); info->perf.calcInteger64 = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_INTEGER64); return 0; } /*! \brief Read information about a CUDA-device. \return \a 0 in case of success, \a -1 in case of error. */ int CZCudaReadDeviceInfo( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { hipDeviceProp_t prop; if(info == NULL) return -1; if(info->num >= CZCudaDeviceFound()) return -1; CZ_CUDA_CALL(hipGetDeviceProperties(&prop, info->num), return -1); strcpy(info->deviceName, prop.name); info->major = prop.major; info->minor = prop.minor; info->core.regsPerBlock = prop.regsPerBlock; info->core.regsPerMultipro = prop.regsPerMultiprocessor; info->core.SIMDWidth = prop.warpSize; info->core.maxThreadsPerBlock = prop.maxThreadsPerBlock; info->core.maxThreadsDim[0] = prop.maxThreadsDim[0]; info->core.maxThreadsDim[1] = prop.maxThreadsDim[1]; info->core.maxThreadsDim[2] = prop.maxThreadsDim[2]; info->core.maxGridSize[0] = prop.maxGridSize[0]; info->core.maxGridSize[1] = prop.maxGridSize[1]; info->core.maxGridSize[2] = prop.maxGridSize[2]; info->core.clockRate = prop.clockRate/1000; info->core.muliProcCount = prop.multiProcessorCount; info->core.kernelExecTimeoutEnabled= prop.kernelExecTimeoutEnabled; info->core.integratedGpu = prop.integrated; info->core.concurrentKernels = prop.concurrentKernels; info->core.computeMode = (prop.computeMode == hipComputeModeDefault)? CZComputeModeDefault: (prop.computeMode == hipComputeModeExclusive)? CZComputeModeExclusive: (prop.computeMode == hipComputeModeProhibited)? CZComputeModeProhibited: CZComputeModeUnknown; info->core.pciBusID = prop.pciBusID; info->core.pciDeviceID = prop.pciDeviceID; info->core.pciDomainID = prop.pciDomainID; info->core.maxThreadsPerMultiProcessor = prop.maxThreadsPerMultiProcessor; info->core.cudaCores = ConvertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount; info->core.streamPrioritiesSupported = prop.streamPrioritiesSupported; info->mem.totalGlobal = prop.totalGlobalMem; info->mem.sharedPerBlock = prop.sharedMemPerBlock; info->mem.sharedPerMultiProcessor = prop.sharedMemPerMultiprocessor; info->mem.maxPitch = prop.memPitch; info->mem.totalConst = prop.totalConstMem; info->mem.textureAlignment = prop.textureAlignment; info->mem.texture1D[0] = prop.maxTexture1D; info->mem.texture2D[0] = prop.maxTexture2D[0]; info->mem.texture2D[1] = prop.maxTexture2D[1]; info->mem.texture3D[0] = prop.maxTexture3D[0]; info->mem.texture3D[1] = prop.maxTexture3D[1]; info->mem.texture3D[2] = prop.maxTexture3D[2]; info->mem.gpuOverlap = prop.deviceOverlap; info->mem.mapHostMemory = prop.canMapHostMemory; info->mem.errorCorrection = prop.ECCEnabled; info->mem.asyncEngineCount = prop.asyncEngineCount; info->mem.unifiedAddressing = prop.unifiedAddressing; info->mem.memoryClockRate = prop.memoryClockRate/1000; info->mem.memoryBusWidth = prop.memoryBusWidth; info->mem.l2CacheSize = prop.l2CacheSize; return 0; } void printInfo(CZDeviceInfo *info) { char valueStr[30]; char valueStrPf[30]; printf("=====================================\n"); printf("DeviceName[%d]:%s\n", info->num, info->deviceName); printf("************Core Info*************\n"); printf("Compute Capability:%d.%d\n", info->major, info->minor); printf("Clock Rate:%d MHz\n", info->core.clockRate); printf("Multiprocessors:%d (%d Cores)\n", info->core.muliProcCount, info->core.cudaCores); printf("Cores Per Multiprocessor:%d\n", info->core.cudaCores/info->core.muliProcCount); printf("WarpSize:%d\n", info->core.SIMDWidth); printf("Max Threads Per Multiprocessor:%d\n", info->core.maxThreadsPerMultiProcessor); printf("Max Threads Per Block:%d\n", info->core.maxThreadsPerBlock); printf("Regs Per Block:%d\n", info->core.regsPerBlock); printf("maxThreadsDim:%dx%dx%d\n", info->core.maxThreadsDim[0], info->core.maxThreadsDim[1], info->core.maxThreadsDim[2]); printf("maxGridSize:%dx%dx%d\n", info->core.maxGridSize[0], info->core.maxGridSize[1], info->core.maxGridSize[2]); printf("computeMode:%d\n", info->core.computeMode); printf("kernelExecTimeoutEnabled:%d\n", info->core.kernelExecTimeoutEnabled); printf("integratedGpu:%d\n", info->core.integratedGpu); printf("concurrentKernels:%d\n", info->core.concurrentKernels); printf("streamPrioritiesSupported:%d\n", info->core.streamPrioritiesSupported); printf("pciBusID:%d\n", info->core.pciBusID); printf("pciDeviceID:%d\n", info->core.pciDeviceID); printf("pciDomainID:%d\n", info->core.pciDomainID); printf("************Memory Info*************\n"); getValue1024(info->mem.totalGlobal, valueStr); printf("totalGlobalMem:%s\n", valueStr); getValue1024(info->mem.totalConst, valueStr); printf("totalConstMem:%s\n", valueStr); getValue1024(info->mem.sharedPerBlock, valueStr); printf("sharedMemPerBlock:%s \n", valueStr); getValue1024(info->mem.sharedPerMultiProcessor, valueStr); printf("sharedMemPerMultiProcessor:%s\n", valueStr); getValue1024(info->mem.l2CacheSize, valueStr); printf("l2CacheSize:%s\n", valueStr); printf("memoryClockRate:%d MHz\n", info->mem.memoryClockRate); printf("memoryBusWidth:%d bits\n", info->mem.memoryBusWidth); getValue1024(info->mem.maxPitch, valueStr); printf("maxPitch:%s \n", valueStr); printf("textureAlignment:%d\n", info->mem.textureAlignment); printf("texture1D Size:%d\n", info->mem.texture1D[0]); printf("texture2D Size:%dx%d\n", info->mem.texture2D[0], info->mem.texture2D[1]); printf("texture3D Size:%dx%dx%d\n", info->mem.texture3D[0], info->mem.texture3D[1], info->mem.texture3D[2]); printf("errorCorrection:%d\n", info->mem.errorCorrection); printf("mapHostMemory:%d\n", info->mem.mapHostMemory); printf("unifiedAddressing:%d\n", info->mem.unifiedAddressing); printf("gpuOverlap:%d\n", info->mem.gpuOverlap); printf("asyncEngineCount:%d\n", info->mem.asyncEngineCount); printf("************Performace Info*************\n"); printf("MemoryCopy Pinned Pageable\n"); getValue1024(info->band.copyHDPin, valueStr, 1, "B/s"); getValue1024(info->band.copyHDPage, valueStrPf, 1, "B/s"); printf("HostToDevice %s %s\n", valueStr, valueStrPf); getValue1024(info->band.copyDHPin, valueStr, 1, "B/s");; getValue1024(info->band.copyDHPage, valueStrPf, 1, "B/s"); printf("DeviceToHost %s %s\n", valueStr, valueStrPf); getValue1024(info->band.copyDD, valueStr, 1, "B/s"); printf("DeviceToDevice %s\n", valueStr); printf("GPU Core Performace\n"); getValue1024(info->perf.calcFloat, valueStr, 1, "flop/s"); printf("Single-precision Float %s\n", valueStr); getValue1024(info->perf.calcDouble, valueStr, 1, "flop/s"); printf("Double-precision Float %s\n", valueStr); getValue1024(info->perf.calcInteger64, valueStr, 1, "iop/s"); printf("64-bit Integer %s\n", valueStr); getValue1024(info->perf.calcInteger32, valueStr, 1, "iop/s"); printf("32-bit Integer %s\n", valueStr); getValue1024(info->perf.calcInteger24, valueStr, 1, "iop/s"); printf("24-bit Integer %s\n", valueStr); }
35a7fde91412c40d57d02e6d1d915edc86c669cf.cu
#include <cuda.h> #include <cuda_runtime.h> #include <host_defines.h> #include <string.h> #include <stdio.h> #include "cudainfo.h" #if CUDA_VERSION < 5050 #error CUDA 1.x - 5.0 are not supported any more! Please use CUDA Toolkit 5.5+ instead. #endif #define CZ_COPY_BUF_SIZE (16 * (1 << 20)) /*!< Transfer buffer size. */ #define CZ_COPY_LOOPS_NUM 8 /*!< Number of loops to run transfer test to. */ #define CZ_CALC_BLOCK_LOOPS 16 /*!< Number of loops to run calculation loop. */ #define CZ_CALC_BLOCK_SIZE 256 /*!< Size of instruction block. */ #define CZ_CALC_BLOCK_NUM 16 /*!< Number of instruction blocks in loop. */ #define CZ_CALC_OPS_NUM 2 /*!< Number of operations per one loop. */ #define CZ_CALC_LOOPS_NUM 8 /*!< Number of loops to run performance test to. */ #define CZ_DEF_WARP_SIZE 32 /*!< Default warp size value. */ #define CZ_DEF_THREADS_MAX 512 /*!< Default max threads value value. */ #define CZ_VER_STR_LEN 256 /*!< Version string length. */ /*! \brief Error handling of CUDA RT calls. */ #define CZ_CUDA_CALL(funcCall, errProc) \ { \ cudaError_t errCode; \ if((errCode = (funcCall)) != cudaSuccess) { \ printf("CUDA Error: %08x %s", errCode, cudaGetErrorString(errCode)); \ errProc; \ } \ } /*! \brief Check how many CUDA-devices are present. \return number of CUDA-devices in case of success, \a 0 if no CUDA-devies were found. */ int CZCudaDeviceFound(void) { int count; CZ_CUDA_CALL(cudaGetDeviceCount(&count), return 0); return count; } /*! \def ConvertSMVer2Cores(major, minor) \brief Get number of CUDA cores per multiprocessor. \arg[in] major GPU Architecture major version. \arg[in] minor GPU Architecture minor version. \returns 0 if GPU Architecture is unknown, or number of CUDA cores per multiprocessor. */ #define ConvertSMVer2Cores(major, minor) \ (((major) == 1)? ( /* Tesla */ \ ((minor) == 0)? 8: /* G80*/ \ ((minor) == 1)? 8: /* G8x, G9x */ \ ((minor) == 2)? 8: /* GT21x */ \ ((minor) == 3)? 8: /* GT200 */ \ 0): \ ((major) == 2)? ( /* Fermi */ \ ((minor) == 0)? 32: /* GF100, GF110 */ \ ((minor) == 1)? 48: /* GF10x, FG11x */ \ 0): \ ((major) == 3)? ( /* Kepler */ \ ((minor) == 0)? 192: /* GK10x */ \ ((minor) == 2)? 192: /* Tegra K1 */ \ ((minor) == 5)? 192: /* GK11x, GK208 */ \ ((minor) == 7)? 192: /* GK210 */ \ 0): \ ((major) == 5)? ( /* Maxwell */ \ ((minor) == 0)? 128: /* GM10X */ \ ((minor) == 2)? 128: /* GM20X */ \ ((minor) == 3)? 128: /* Tegra X1 */ \ 0): \ 0) void getValue1024(double value, char *valueStr, int prefix = 0, const char *unit = "B") { const int prefixBase = 1024; int resPrefix = prefix; static const char *prefixTab[9] = { "", /* prefixNothing */ "K", /* prefixK */ "M", /* prefixM */ "G", /* prefixG */ "T", /* prefixT */ "P", /* prefixP */ "E", /* prefixE */ "Z", /* prefixZ */ "Y", /* prefixY */ }; while((value > prefixBase) && (resPrefix < 9)) { value /= prefixBase; resPrefix++; } sprintf(valueStr, "%.2f %s%s", value, prefixTab[resPrefix], unit); } /*! \brief Local service data structure for bandwith calulations. * */ struct CZDeviceInfoBandLocalData { void *memHostPage; /*!< Pageable host memory. */ void *memHostPin; /*!< Pinned host memory. */ void *memDevice1; /*!< Device memory buffer 1. */ void *memDevice2; /*!< Device memory buffer 2. */ }; /*! \brief Set device for current thread. * */ int CZCudaCalcDeviceSelect( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { CZ_CUDA_CALL(cudaSetDevice(info->num), return -1); return 0; } /*! \brief Allocate buffers for bandwidth calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDeviceBandwidthAlloc( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { CZDeviceInfoBandLocalData *lData; if(info == NULL) return -1; if(info->band.localData == NULL) { //CZLog(CZLogLevelLow, "Alloc local buffers for %s.", info->deviceName); lData = (CZDeviceInfoBandLocalData*)malloc(sizeof(*lData)); if(lData == NULL) { return -1; } //CZLog(CZLogLevelLow, "Alloc host pageable for %s.", info->deviceName); lData->memHostPage = (void*)malloc(CZ_COPY_BUF_SIZE); if(lData->memHostPage == NULL) { free(lData); return -1; } //CZLog(CZLogLevelLow, "Host pageable is at 0x%08X.", lData->memHostPage); //CZLog(CZLogLevelLow, "Alloc host pinned for %s.", info->deviceName); CZ_CUDA_CALL(cudaMallocHost((void**)&lData->memHostPin, CZ_COPY_BUF_SIZE), free(lData->memHostPage); free(lData); return -1); //CZLog(CZLogLevelLow, "Host pinned is at 0x%08X.", lData->memHostPin); //CZLog(CZLogLevelLow, "Alloc device buffer 1 for %s.", info->deviceName); CZ_CUDA_CALL(cudaMalloc((void**)&lData->memDevice1, CZ_COPY_BUF_SIZE), cudaFreeHost(lData->memHostPin); free(lData->memHostPage); free(lData); return -1); //CZLog(CZLogLevelLow, "Device buffer 1 is at 0x%08X.", lData->memDevice1); //CZLog(CZLogLevelLow, "Alloc device buffer 2 for %s.", info->deviceName); CZ_CUDA_CALL(cudaMalloc((void**)&lData->memDevice2, CZ_COPY_BUF_SIZE), cudaFree(lData->memDevice1); cudaFreeHost(lData->memHostPin); free(lData->memHostPage); free(lData); return -1); //CZLog(CZLogLevelLow, "Device buffer 2 is at 0x%08X.", lData->memDevice2); info->band.localData = (void*)lData; } return 0; } /*! \brief Free buffers for bandwidth calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDeviceBandwidthFree( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { CZDeviceInfoBandLocalData *lData; if(info == NULL) return -1; lData = (CZDeviceInfoBandLocalData*)info->band.localData; if(lData != NULL) { //CZLog(CZLogLevelLow, "Free host pageable for %s.", info->deviceName); if(lData->memHostPage != NULL) free(lData->memHostPage); //CZLog(CZLogLevelLow, "Free host pinned for %s.", info->deviceName); if(lData->memHostPin != NULL) cudaFreeHost(lData->memHostPin); //CZLog(CZLogLevelLow, "Free device buffer 1 for %s.", info->deviceName); if(lData->memDevice1 != NULL) cudaFree(lData->memDevice1); //CZLog(CZLogLevelLow, "Free device buffer 2 for %s.", info->deviceName); if(lData->memDevice2 != NULL) cudaFree(lData->memDevice2); //CZLog(CZLogLevelLow, "Free local buffers for %s.", info->deviceName); free(lData); } info->band.localData = NULL; return 0; } /*! \brief Reset results of bandwidth calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDeviceBandwidthReset( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; info->band.copyHDPage = 0; info->band.copyHDPin = 0; info->band.copyDHPage = 0; info->band.copyDHPin = 0; info->band.copyDD = 0; return 0; } #define CZ_COPY_MODE_H2D 0 /*!< Host to device data copy mode. */ #define CZ_COPY_MODE_D2H 1 /*!< Device to host data copy mode. */ #define CZ_COPY_MODE_D2D 2 /*!< Device to device data copy mode. */ /*! \brief Run data transfer bandwidth tests. * \return \a 0 in case of success, \a other is value in KiB/s. * */ static float CZCudaCalcDeviceBandwidthTestCommon ( struct CZDeviceInfo *info, /*!<[in,out] CUDA-device information. */ int mode, /*!<[in] Run bandwidth test in one of modes. */ int pinned /*!<[in] Use pinned \a (=1) memory buffer instead of pagable \a (=0). */ ) { CZDeviceInfoBandLocalData *lData; float timeMs = 0.0; float bandwidthKiBs = 0.0; cudaEvent_t start; cudaEvent_t stop; void *memHost; void *memDevice1; void *memDevice2; int i; if(info == NULL) return 0; CZ_CUDA_CALL(cudaEventCreate(&start), return 0); CZ_CUDA_CALL(cudaEventCreate(&stop), cudaEventDestroy(start); return 0); lData = (CZDeviceInfoBandLocalData*)info->band.localData; memHost = pinned? lData->memHostPin: lData->memHostPage; memDevice1 = lData->memDevice1; memDevice2 = lData->memDevice2; /*CZLog(CZLogLevelLow, "Starting %s test (%s) on %s.", (mode == CZ_COPY_MODE_H2D)? "host to device": (mode == CZ_COPY_MODE_D2H)? "device to host": (mode == CZ_COPY_MODE_D2D)? "device to device": "unknown", pinned? "pinned": "pageable", info->deviceName);*/ for(i = 0; i < CZ_COPY_LOOPS_NUM; i++) { float loopMs = 0.0; CZ_CUDA_CALL(cudaEventRecord(start, 0), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); switch(mode) { case CZ_COPY_MODE_H2D: CZ_CUDA_CALL(cudaMemcpy(memDevice1, memHost, CZ_COPY_BUF_SIZE, cudaMemcpyHostToDevice), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); break; case CZ_COPY_MODE_D2H: CZ_CUDA_CALL(cudaMemcpy(memHost, memDevice2, CZ_COPY_BUF_SIZE, cudaMemcpyDeviceToHost), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); break; case CZ_COPY_MODE_D2D: CZ_CUDA_CALL(cudaMemcpy(memDevice2, memDevice1, CZ_COPY_BUF_SIZE, cudaMemcpyDeviceToDevice), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); break; default: // WTF! cudaEventDestroy(start); cudaEventDestroy(stop); return 0; } CZ_CUDA_CALL(cudaEventRecord(stop, 0), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); CZ_CUDA_CALL(cudaEventSynchronize(stop), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); CZ_CUDA_CALL(cudaEventElapsedTime(&loopMs, start, stop), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); timeMs += loopMs; } //CZLog(CZLogLevelLow, "Test complete in %f ms.", timeMs); bandwidthKiBs = ( 1000 * (float)CZ_COPY_BUF_SIZE * (float)CZ_COPY_LOOPS_NUM ) / ( timeMs * (float)(1 << 10) ); cudaEventDestroy(start); cudaEventDestroy(stop); return bandwidthKiBs; } /*! \brief Run several bandwidth tests. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDeviceBandwidthTest( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { info->band.copyHDPage = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_H2D, 0); info->band.copyHDPin = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_H2D, 1); info->band.copyDHPage = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_D2H, 0); info->band.copyDHPin = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_D2H, 1); info->band.copyDD = CZCudaCalcDeviceBandwidthTestCommon(info, CZ_COPY_MODE_D2D, 0); return 0; } /*! \brief Calculate bandwidth information about CUDA-device. * \return \a 0 in case of success, \a -1 in case of error. * */ int CZCudaCalcDeviceBandwidth( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; if (CZCudaCalcDeviceSelect(info) != 0) return -1; if(CZCudaCalcDeviceBandwidthReset(info) != 0) return -1; if(CZCudaCalcDeviceBandwidthAlloc(info) != 0) return -1; if(CZCudaCalcDeviceBandwidthTest(info) != 0) return -1; // CZCudaCalcDeviceBandwidthFree(info); return 0; } /*! \brief Cleanup after test and bandwidth calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ int CZCudaCleanDevice( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; if(CZCudaCalcDeviceBandwidthFree(info) != 0) return -1; return 0; } /*! \brief Reset results of preformance calculations. * \return \a 0 in case of success, \a -1 in case of error. * */ static int CZCudaCalcDevicePerformanceReset( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; info->perf.calcFloat = 0; info->perf.calcDouble = 0; info->perf.calcInteger32 = 0; info->perf.calcInteger24 = 0; info->perf.calcInteger64 = 0; return 0; } /*! \brief 16 MAD instructions for float point test. * */ #define CZ_CALC_FMAD_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /*! \brief 256 MAD instructions for float point test. * */ #define CZ_CALC_FMAD_256(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ CZ_CALC_FMAD_16(a, b) CZ_CALC_FMAD_16(a, b) \ /*! \brief 16 DMAD instructions for double-precision test. * */ #define CZ_CALC_DFMAD_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /* a = fma(a, a, a); b = fma(b, b, b); a = fma(a, a, a); b = fma(b, b, b); \ * a = fma(a, a, a); b = fma(b, b, b); a = fma(a, a, a); b = fma(b, b, b); \ * a = fma(a, a, a); b = fma(b, b, b); a = fma(a, a, a); b = fma(b, b, b); \ * a = fma(a, a, a); b = fma(b, b, b); a = fma(a, a, a); b = fma(b, b, b); \*/ /*! \brief 256 MAD instructions for float point test. * */ #define CZ_CALC_DFMAD_256(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ CZ_CALC_DFMAD_16(a, b) CZ_CALC_DFMAD_16(a, b) \ /*! \brief 16 MAD instructions for 32-bit integer test. * */ #define CZ_CALC_IMAD32_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /*! \brief 256 MAD instructions for 32-bit integer test. * */ #define CZ_CALC_IMAD32_256(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ CZ_CALC_IMAD32_16(a, b) CZ_CALC_IMAD32_16(a, b) \ /*! \brief 16 MAD instructions for 64-bit integer test. * */ #define CZ_CALC_IMAD64_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /*! \brief 256 MAD instructions for 64-bit integer test. * */ #define CZ_CALC_IMAD64_256(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ CZ_CALC_IMAD64_16(a, b) CZ_CALC_IMAD64_16(a, b) \ /*! \brief 16 MAD instructions for 24-bit integer test. * */ #define CZ_CALC_IMAD24_16(a, b) \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ a = __mul24(a, a) + a; b = __mul24(b, b) + b; \ /*! \brief 256 MAD instructions for 24-bit integer test. * */ #define CZ_CALC_IMAD24_256(a, b) \ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ CZ_CALC_IMAD24_16(a, b) CZ_CALC_IMAD24_16(a, b)\ /*! \brief 16 MAD instructions for 8-bit integer test. * */ #define CZ_CALC_IMAD8_16(a, b) \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ a = a * a + a; b = b * b + b; a = a * a + a; b = b * b + b; \ /*! \brief 256 MAD instructions for 8-bit integer test. * */ #define CZ_CALC_IMAD8_256(a, b) \ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ CZ_CALC_IMAD8_16(a, b) CZ_CALC_IMAD8_16(a, b)\ #define CZ_CALC_MODE_FLOAT 0 /*!< Single-precision float point test mode. */ #define CZ_CALC_MODE_DOUBLE 1 /*!< Double-precision float point test mode. */ #define CZ_CALC_MODE_INTEGER8 2 /*!< 8-bit integer test mode. */ #define CZ_CALC_MODE_INTEGER32 3 /*!< 32-bit integer test mode. */ #define CZ_CALC_MODE_INTEGER24 4 /*!< 24-bit integer test mode. */ #define CZ_CALC_MODE_INTEGER64 5 /*!< 64-bit integer test mode. */ /*! \brief GPU code for float point test. * */ __global__ void CZCudaCalcKernelFloat( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; float *arr = (float*)buf; float val1 = index; float val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); CZ_CALC_FMAD_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for double-precision test. * */ __global__ void CZCudaCalcKernelDouble( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; double *arr = (double*)buf; double val1 = index; double val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); CZ_CALC_DFMAD_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for 8-bit integer test. * */ __global__ void CZCudaCalcKernelInteger8( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; char *arr = (char*)buf; int val1 = index; int val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); CZ_CALC_IMAD8_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for 32-bit integer test. * */ __global__ void CZCudaCalcKernelInteger32( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; int *arr = (int*)buf; int val1 = index; int val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); CZ_CALC_IMAD32_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for 24-bit integer test. * */ __global__ void CZCudaCalcKernelInteger24( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; int *arr = (int*)buf; int val1 = index; int val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); CZ_CALC_IMAD24_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief GPU code for 64-bit integer test. * */ __global__ void CZCudaCalcKernelInteger64( void *buf /*!<[in] Data buffer. */ ) { int index = blockIdx.x * blockDim.x + threadIdx.x; long long *arr = (long long*)buf; long long val1 = index; long long val2 = arr[index]; int i; for(i = 0; i < CZ_CALC_BLOCK_LOOPS; i++) { CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); CZ_CALC_IMAD64_256(val1, val2); } arr[index] = val1 + val2; } /*! \brief Run GPU calculation performace tests. * \return \a 0 in case of success, \a -1 in case of error. * */ static float CZCudaCalcDevicePerformanceTest( struct CZDeviceInfo *info, /*!<[in,out] CUDA-device information. */ int mode /*!<[in] Run performance test in one of modes. */ ) { CZDeviceInfoBandLocalData *lData; float timeMs = 0.0; float performanceKOPs = 0.0; cudaEvent_t start; cudaEvent_t stop; int blocksNum = info->heavyMode? info->core.muliProcCount: 1; int i; if(info == NULL) return 0; CZ_CUDA_CALL(cudaEventCreate(&start), return 0); CZ_CUDA_CALL(cudaEventCreate(&stop), cudaEventDestroy(start); return 0); if (info->band.localData == NULL) { lData = (CZDeviceInfoBandLocalData*)malloc(sizeof(CZDeviceInfoBandLocalData)); if(lData == NULL) { return 0; } memset(lData, 0, sizeof(CZDeviceInfoBandLocalData)); info->band.localData = lData; } if (lData->memDevice1 == NULL) { CZ_CUDA_CALL(cudaMalloc((void**)&lData->memDevice1, CZ_COPY_BUF_SIZE), free(lData); return 0); } int threadsNum = info->core.maxThreadsPerBlock; if(threadsNum == 0) { int warpSize = info->core.SIMDWidth; if(warpSize == 0) warpSize = CZ_DEF_WARP_SIZE; threadsNum = warpSize * 2; if(threadsNum > CZ_DEF_THREADS_MAX) threadsNum = CZ_DEF_THREADS_MAX; } /* CZLog(CZLogLevelLow, "Starting %s test on %s on %d block(s) %d thread(s) each.", (mode == CZ_CALC_MODE_FLOAT)? "single-precision float": (mode == CZ_CALC_MODE_DOUBLE)? "double-precision float": (mode == CZ_CALC_MODE_INTEGER8)? "8-bit integer": (mode == CZ_CALC_MODE_INTEGER32)? "32-bit integer": (mode == CZ_CALC_MODE_INTEGER24)? "24-bit integer": (mode == CZ_CALC_MODE_INTEGER64)? "64-bit integer": "unknown", info->deviceName, blocksNum, threadsNum);*/ for(i = 0; i < CZ_CALC_LOOPS_NUM; i++) { float loopMs = 0.0; CZ_CUDA_CALL(cudaEventRecord(start, 0), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); switch(mode) { case CZ_CALC_MODE_FLOAT: CZCudaCalcKernelFloat<<<blocksNum, threadsNum>>>(lData->memDevice1); break; case CZ_CALC_MODE_DOUBLE: CZCudaCalcKernelDouble<<<blocksNum, threadsNum>>>(lData->memDevice1); break; case CZ_CALC_MODE_INTEGER8: CZCudaCalcKernelInteger8<<<blocksNum, threadsNum>>>(lData->memDevice1); break; case CZ_CALC_MODE_INTEGER32: CZCudaCalcKernelInteger32<<<blocksNum, threadsNum>>>(lData->memDevice1); break; case CZ_CALC_MODE_INTEGER24: CZCudaCalcKernelInteger24<<<blocksNum, threadsNum>>>(lData->memDevice1); break; case CZ_CALC_MODE_INTEGER64: CZCudaCalcKernelInteger64<<<blocksNum, threadsNum>>>(lData->memDevice1); break; default: // WTF! cudaEventDestroy(start); cudaEventDestroy(stop); return 0; } CZ_CUDA_CALL(cudaGetLastError(), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); CZ_CUDA_CALL(cudaEventRecord(stop, 0), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); CZ_CUDA_CALL(cudaEventSynchronize(stop), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); CZ_CUDA_CALL(cudaEventElapsedTime(&loopMs, start, stop), cudaEventDestroy(start); cudaEventDestroy(stop); return 0); timeMs += loopMs; } //CZLog(CZLogLevelLow, "Test complete in %f ms.", timeMs); performanceKOPs = ( (float)info->core.muliProcCount * (float)CZ_CALC_LOOPS_NUM * (float)threadsNum * (float)CZ_CALC_BLOCK_LOOPS * (float)CZ_CALC_OPS_NUM * (float)CZ_CALC_BLOCK_SIZE * (float)CZ_CALC_BLOCK_NUM ) / (float)timeMs; cudaEventDestroy(start); cudaEventDestroy(stop); CZCudaCalcDeviceBandwidthFree(info); return performanceKOPs; } /*! \brief Calculate performance information about CUDA-device. * \return \a 0 in case of success, \a -1 in case of error. * */ int CZCudaCalcDevicePerformance( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { if(info == NULL) return -1; if (CZCudaCalcDeviceSelect(info) != 0) return -1; if(CZCudaCalcDevicePerformanceReset(info) != 0) return -1; info->perf.calcFloat = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_FLOAT); if(((info->major > 1)) || ((info->major == 1) && (info->minor >= 3))) info->perf.calcDouble = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_DOUBLE); //info->perf.calcInteger8 = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_INTEGER8); info->perf.calcInteger32 = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_INTEGER32); info->perf.calcInteger24 = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_INTEGER24); info->perf.calcInteger64 = CZCudaCalcDevicePerformanceTest(info, CZ_CALC_MODE_INTEGER64); return 0; } /*! \brief Read information about a CUDA-device. \return \a 0 in case of success, \a -1 in case of error. */ int CZCudaReadDeviceInfo( struct CZDeviceInfo *info /*!<[in,out] CUDA-device information. */ ) { cudaDeviceProp prop; if(info == NULL) return -1; if(info->num >= CZCudaDeviceFound()) return -1; CZ_CUDA_CALL(cudaGetDeviceProperties(&prop, info->num), return -1); strcpy(info->deviceName, prop.name); info->major = prop.major; info->minor = prop.minor; info->core.regsPerBlock = prop.regsPerBlock; info->core.regsPerMultipro = prop.regsPerMultiprocessor; info->core.SIMDWidth = prop.warpSize; info->core.maxThreadsPerBlock = prop.maxThreadsPerBlock; info->core.maxThreadsDim[0] = prop.maxThreadsDim[0]; info->core.maxThreadsDim[1] = prop.maxThreadsDim[1]; info->core.maxThreadsDim[2] = prop.maxThreadsDim[2]; info->core.maxGridSize[0] = prop.maxGridSize[0]; info->core.maxGridSize[1] = prop.maxGridSize[1]; info->core.maxGridSize[2] = prop.maxGridSize[2]; info->core.clockRate = prop.clockRate/1000; info->core.muliProcCount = prop.multiProcessorCount; info->core.kernelExecTimeoutEnabled= prop.kernelExecTimeoutEnabled; info->core.integratedGpu = prop.integrated; info->core.concurrentKernels = prop.concurrentKernels; info->core.computeMode = (prop.computeMode == cudaComputeModeDefault)? CZComputeModeDefault: (prop.computeMode == cudaComputeModeExclusive)? CZComputeModeExclusive: (prop.computeMode == cudaComputeModeProhibited)? CZComputeModeProhibited: CZComputeModeUnknown; info->core.pciBusID = prop.pciBusID; info->core.pciDeviceID = prop.pciDeviceID; info->core.pciDomainID = prop.pciDomainID; info->core.maxThreadsPerMultiProcessor = prop.maxThreadsPerMultiProcessor; info->core.cudaCores = ConvertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount; info->core.streamPrioritiesSupported = prop.streamPrioritiesSupported; info->mem.totalGlobal = prop.totalGlobalMem; info->mem.sharedPerBlock = prop.sharedMemPerBlock; info->mem.sharedPerMultiProcessor = prop.sharedMemPerMultiprocessor; info->mem.maxPitch = prop.memPitch; info->mem.totalConst = prop.totalConstMem; info->mem.textureAlignment = prop.textureAlignment; info->mem.texture1D[0] = prop.maxTexture1D; info->mem.texture2D[0] = prop.maxTexture2D[0]; info->mem.texture2D[1] = prop.maxTexture2D[1]; info->mem.texture3D[0] = prop.maxTexture3D[0]; info->mem.texture3D[1] = prop.maxTexture3D[1]; info->mem.texture3D[2] = prop.maxTexture3D[2]; info->mem.gpuOverlap = prop.deviceOverlap; info->mem.mapHostMemory = prop.canMapHostMemory; info->mem.errorCorrection = prop.ECCEnabled; info->mem.asyncEngineCount = prop.asyncEngineCount; info->mem.unifiedAddressing = prop.unifiedAddressing; info->mem.memoryClockRate = prop.memoryClockRate/1000; info->mem.memoryBusWidth = prop.memoryBusWidth; info->mem.l2CacheSize = prop.l2CacheSize; return 0; } void printInfo(CZDeviceInfo *info) { char valueStr[30]; char valueStrPf[30]; printf("=====================================\n"); printf("DeviceName[%d]:%s\n", info->num, info->deviceName); printf("************Core Info*************\n"); printf("Compute Capability:%d.%d\n", info->major, info->minor); printf("Clock Rate:%d MHz\n", info->core.clockRate); printf("Multiprocessors:%d (%d Cores)\n", info->core.muliProcCount, info->core.cudaCores); printf("Cores Per Multiprocessor:%d\n", info->core.cudaCores/info->core.muliProcCount); printf("WarpSize:%d\n", info->core.SIMDWidth); printf("Max Threads Per Multiprocessor:%d\n", info->core.maxThreadsPerMultiProcessor); printf("Max Threads Per Block:%d\n", info->core.maxThreadsPerBlock); printf("Regs Per Block:%d\n", info->core.regsPerBlock); printf("maxThreadsDim:%dx%dx%d\n", info->core.maxThreadsDim[0], info->core.maxThreadsDim[1], info->core.maxThreadsDim[2]); printf("maxGridSize:%dx%dx%d\n", info->core.maxGridSize[0], info->core.maxGridSize[1], info->core.maxGridSize[2]); printf("computeMode:%d\n", info->core.computeMode); printf("kernelExecTimeoutEnabled:%d\n", info->core.kernelExecTimeoutEnabled); printf("integratedGpu:%d\n", info->core.integratedGpu); printf("concurrentKernels:%d\n", info->core.concurrentKernels); printf("streamPrioritiesSupported:%d\n", info->core.streamPrioritiesSupported); printf("pciBusID:%d\n", info->core.pciBusID); printf("pciDeviceID:%d\n", info->core.pciDeviceID); printf("pciDomainID:%d\n", info->core.pciDomainID); printf("************Memory Info*************\n"); getValue1024(info->mem.totalGlobal, valueStr); printf("totalGlobalMem:%s\n", valueStr); getValue1024(info->mem.totalConst, valueStr); printf("totalConstMem:%s\n", valueStr); getValue1024(info->mem.sharedPerBlock, valueStr); printf("sharedMemPerBlock:%s \n", valueStr); getValue1024(info->mem.sharedPerMultiProcessor, valueStr); printf("sharedMemPerMultiProcessor:%s\n", valueStr); getValue1024(info->mem.l2CacheSize, valueStr); printf("l2CacheSize:%s\n", valueStr); printf("memoryClockRate:%d MHz\n", info->mem.memoryClockRate); printf("memoryBusWidth:%d bits\n", info->mem.memoryBusWidth); getValue1024(info->mem.maxPitch, valueStr); printf("maxPitch:%s \n", valueStr); printf("textureAlignment:%d\n", info->mem.textureAlignment); printf("texture1D Size:%d\n", info->mem.texture1D[0]); printf("texture2D Size:%dx%d\n", info->mem.texture2D[0], info->mem.texture2D[1]); printf("texture3D Size:%dx%dx%d\n", info->mem.texture3D[0], info->mem.texture3D[1], info->mem.texture3D[2]); printf("errorCorrection:%d\n", info->mem.errorCorrection); printf("mapHostMemory:%d\n", info->mem.mapHostMemory); printf("unifiedAddressing:%d\n", info->mem.unifiedAddressing); printf("gpuOverlap:%d\n", info->mem.gpuOverlap); printf("asyncEngineCount:%d\n", info->mem.asyncEngineCount); printf("************Performace Info*************\n"); printf("MemoryCopy Pinned Pageable\n"); getValue1024(info->band.copyHDPin, valueStr, 1, "B/s"); getValue1024(info->band.copyHDPage, valueStrPf, 1, "B/s"); printf("HostToDevice %s %s\n", valueStr, valueStrPf); getValue1024(info->band.copyDHPin, valueStr, 1, "B/s");; getValue1024(info->band.copyDHPage, valueStrPf, 1, "B/s"); printf("DeviceToHost %s %s\n", valueStr, valueStrPf); getValue1024(info->band.copyDD, valueStr, 1, "B/s"); printf("DeviceToDevice %s\n", valueStr); printf("GPU Core Performace\n"); getValue1024(info->perf.calcFloat, valueStr, 1, "flop/s"); printf("Single-precision Float %s\n", valueStr); getValue1024(info->perf.calcDouble, valueStr, 1, "flop/s"); printf("Double-precision Float %s\n", valueStr); getValue1024(info->perf.calcInteger64, valueStr, 1, "iop/s"); printf("64-bit Integer %s\n", valueStr); getValue1024(info->perf.calcInteger32, valueStr, 1, "iop/s"); printf("32-bit Integer %s\n", valueStr); getValue1024(info->perf.calcInteger24, valueStr, 1, "iop/s"); printf("24-bit Integer %s\n", valueStr); }
9a9b6b61f1aab9b75f6c8094906adc69e22e3e0e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <chrono> #include <hip/hip_runtime.h> #include "kernel.h" int rounded_division(int number1, int number2) { if (number1 % number2 == 0) return number1 / number2; return number1 / number2 + 1; } template <typename T> void contract (const int max_N, const int max_C, const int repeat) { // tensor const size_t tensor_size = max_N * max_N * max_N * max_C * sizeof(T); const size_t tensor_size_byte = tensor_size * sizeof(T); T* tensor_value = (T*) malloc (tensor_size_byte); for (int i = 0; i < max_N * max_N * max_N * max_C; i++) tensor_value[i] = 1; T* device_tensor_value; hipMalloc(&device_tensor_value, tensor_size_byte); // adjacency matrix const size_t adj_size = max_N * max_N; const size_t adj_size_byte = adj_size * sizeof(T); // longest kernel time occurs when all values in adj_value are positive T* adj_value = (T*) malloc (adj_size_byte); for (size_t i = 0; i < adj_size; i++) adj_value[i] = 1; T* device_adj_value; hipMalloc((void**)&device_adj_value, adj_size_byte); // output value const size_t output_size = max_N * max_N * max_C * nContractions; const size_t output_size_byte = max_N * max_N * max_C * nContractions * sizeof(T); T* value = (T*) malloc (output_size_byte); T* device_value; hipMalloc((void**)&device_value, output_size_byte); // launch kernel hipMemcpy(device_tensor_value, tensor_value, tensor_size_byte, hipMemcpyHostToDevice); hipMemcpy(device_adj_value, adj_value, adj_size_byte, hipMemcpyHostToDevice); const int nThreads = 256; dim3 dimGrid(rounded_division(output_size, nThreads)); dim3 dimBlock(nThreads); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) hipLaunchKernelGGL(( contraction) , dim3(dimGrid), dim3(dimBlock) , 0, 0, device_tensor_value, device_adj_value, device_value, output_size, max_N, max_C); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / repeat); hipMemcpy(value, device_value, output_size_byte, hipMemcpyDeviceToHost); double checksum = 0; for (size_t i = 0; i < output_size; i++) checksum += value[i]; printf("Checksum: %lf min:%lf max:%lf\n", checksum, *std::min_element(value, value+output_size), *std::max_element(value, value+output_size)); hipFree(device_value); hipFree(device_tensor_value); hipFree(device_adj_value); free(value); free(tensor_value); free(adj_value); } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <dimension> <repeat>\n", argv[0]); return 1; } int max_N = atoi(argv[1]); int max_C = nContractions; int repeat = atoi(argv[2]); contract<float>(max_N, max_C, repeat); contract<double>(max_N, max_C, repeat); return 0; }
9a9b6b61f1aab9b75f6c8094906adc69e22e3e0e.cu
#include <stdio.h> #include <stdlib.h> #include <algorithm> #include <chrono> #include <cuda.h> #include "kernel.h" int rounded_division(int number1, int number2) { if (number1 % number2 == 0) return number1 / number2; return number1 / number2 + 1; } template <typename T> void contract (const int max_N, const int max_C, const int repeat) { // tensor const size_t tensor_size = max_N * max_N * max_N * max_C * sizeof(T); const size_t tensor_size_byte = tensor_size * sizeof(T); T* tensor_value = (T*) malloc (tensor_size_byte); for (int i = 0; i < max_N * max_N * max_N * max_C; i++) tensor_value[i] = 1; T* device_tensor_value; cudaMalloc(&device_tensor_value, tensor_size_byte); // adjacency matrix const size_t adj_size = max_N * max_N; const size_t adj_size_byte = adj_size * sizeof(T); // longest kernel time occurs when all values in adj_value are positive T* adj_value = (T*) malloc (adj_size_byte); for (size_t i = 0; i < adj_size; i++) adj_value[i] = 1; T* device_adj_value; cudaMalloc((void**)&device_adj_value, adj_size_byte); // output value const size_t output_size = max_N * max_N * max_C * nContractions; const size_t output_size_byte = max_N * max_N * max_C * nContractions * sizeof(T); T* value = (T*) malloc (output_size_byte); T* device_value; cudaMalloc((void**)&device_value, output_size_byte); // launch kernel cudaMemcpy(device_tensor_value, tensor_value, tensor_size_byte, cudaMemcpyHostToDevice); cudaMemcpy(device_adj_value, adj_value, adj_size_byte, cudaMemcpyHostToDevice); const int nThreads = 256; dim3 dimGrid(rounded_division(output_size, nThreads)); dim3 dimBlock(nThreads); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) contraction <<< dimGrid, dimBlock >>> ( device_tensor_value, device_adj_value, device_value, output_size, max_N, max_C); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / repeat); cudaMemcpy(value, device_value, output_size_byte, cudaMemcpyDeviceToHost); double checksum = 0; for (size_t i = 0; i < output_size; i++) checksum += value[i]; printf("Checksum: %lf min:%lf max:%lf\n", checksum, *std::min_element(value, value+output_size), *std::max_element(value, value+output_size)); cudaFree(device_value); cudaFree(device_tensor_value); cudaFree(device_adj_value); free(value); free(tensor_value); free(adj_value); } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: %s <dimension> <repeat>\n", argv[0]); return 1; } int max_N = atoi(argv[1]); int max_C = nContractions; int repeat = atoi(argv[2]); contract<float>(max_N, max_C, repeat); contract<double>(max_N, max_C, repeat); return 0; }
182b0b4987743e5e04b67bcfef087a63c932dd9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "qTensorLatticeModel.cuh" #include "../../inc/qTensorFunctions.h" //#incldue "qTensorfunctions.h" /*! \file qTensorLatticeModel.cu */ /*! \addtogroup modelKernels @{ */ __global__ void gpu_largestEigenvalue_kernel(dVec *Q,scalar *defects,int *t, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; if(t[idx]>0) return; scalar a,b,c; eigenvaluesOfQ(Q[idx],a,b,c); defects[idx] = max(max(a,b),c); return; } __global__ void gpu_computeDeterminant_kernel(dVec *Q,scalar *defects,int *t, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; if(t[idx]>0) return; defects[idx] = determinantOfQ(Q[idx]); return; } __global__ void gpu_degenerateEigenvalue_kernel(dVec *Q,scalar *defects,int *t, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; if(t[idx]>0) return; scalar trQ2 = TrQ2(Q[idx]); scalar det = determinantOfQ(Q[idx]); defects[idx] = trQ2*trQ2*trQ2 - 54.0*det*det; return; } __global__ void gpu_set_random_nematic_qTensors_kernel(dVec *pos, int *type, hiprandState_t *rngs,scalar amplitude, bool globallyAligned, scalar globalTheta, scalar globalPhi,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; hiprandState_t randState; randState = rngs[idx]; scalar theta = acos(2.0*hiprand_uniform(&randState)-1); scalar phi = 2.0*PI*hiprand_uniform(&randState); if(globallyAligned) { theta = globalTheta; phi = globalPhi; } scalar3 n; n.x = cos(phi)*sin(theta); n.y = sin(phi)*sin(theta); n.z = cos(theta); if(type[idx] <=0) qTensorFromDirector(n, amplitude, pos[idx]); rngs[idx] = randState; return; }; __global__ void gpu_update_qTensor_simple_kernel(dVec *d_disp, dVec *d_pos, scalar scale, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; int pidx = idx/DIMENSION; if(pidx>=N) return; int didx = idx%DIMENSION; d_pos[pidx][didx] += scale*d_disp[pidx][didx]; /* scalar max = (didx >2 ) ? .5 : .834; scalar min = (didx >2 ) ? -.75 : -.667; if(d_pos[pidx][didx] > max) d_pos[pidx][didx] = max; if(d_pos[pidx][didx] < min) d_pos[pidx][didx] = min; */ return; }; __global__ void gpu_update_qTensor_simple_kernel(dVec *d_disp, dVec *d_pos, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; int pidx = idx/DIMENSION; if(pidx>=N) return; int didx = idx%DIMENSION; d_pos[pidx][didx] += d_disp[pidx][didx]; /* scalar max = (didx >2 ) ? .5 : .834; scalar min = (didx >2 ) ? -.75 : -.667; if(d_pos[pidx][didx] > max) d_pos[pidx][didx] = max; if(d_pos[pidx][didx] < min) d_pos[pidx][didx] = min; */ return; }; bool gpu_update_qTensor(dVec *d_disp, dVec *Q, scalar scale, int N,int blockSize) { if (N < 128) blockSize = 16; unsigned int nBlocks = DIMENSION*N/blockSize + 1; if(DIMENSION <5) { printf("\nAttempting to initialize Q-tensors with incorrectly set dimension...change the root CMakeLists.txt file to have dimension 5 and recompile\n"); throw std::exception(); } hipLaunchKernelGGL(( gpu_update_qTensor_simple_kernel), dim3(nBlocks),dim3(blockSize), 0, 0, d_disp, Q,scale,N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; bool gpu_update_qTensor(dVec *d_disp, dVec *Q, int N,int blockSize) { if (N < 128) blockSize = 16; unsigned int nBlocks = DIMENSION*N/blockSize + 1; if(DIMENSION <5) { printf("\nAttempting to initialize Q-tensors with incorrectly set dimension...change the root CMakeLists.txt file to have dimension 5 and recompile\n"); throw std::exception(); } hipLaunchKernelGGL(( gpu_update_qTensor_simple_kernel), dim3(nBlocks),dim3(blockSize), 0, 0, d_disp, Q,N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; bool gpu_set_random_nematic_qTensors(dVec *d_pos, int *d_types, hiprandState_t *rngs, scalar amplitude, int blockSize, int nBlocks, bool globallyAligned, scalar theta, scalar phi, int N ) { if(DIMENSION <5) { printf("\nAttempting to initialize Q-tensors with incorrectly set dimension...change the root CMakeLists.txt file to have dimension 5 and recompile\n"); throw std::exception(); } hipLaunchKernelGGL(( gpu_set_random_nematic_qTensors_kernel), dim3(nBlocks),dim3(blockSize), 0, 0, d_pos,d_types, rngs,amplitude, globallyAligned, theta, phi,N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; } bool gpu_get_qtensor_DefectMeasures(dVec *Q, scalar *defects, int *t, int defectType, int N) { //optimize block size later unsigned int block_size = 128; if (N < 128) block_size = 16; unsigned int nblocks = N/block_size + 1; if(defectType ==0) hipLaunchKernelGGL(( gpu_largestEigenvalue_kernel), dim3(nblocks),dim3(block_size), 0, 0, Q,defects,t,N); if(defectType ==1) hipLaunchKernelGGL(( gpu_computeDeterminant_kernel), dim3(nblocks),dim3(block_size), 0, 0, Q,defects,t,N); if(defectType ==2) hipLaunchKernelGGL(( gpu_degenerateEigenvalue_kernel), dim3(nblocks),dim3(block_size), 0, 0, Q,defects,t,N); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /** @} */ //end of group declaration
182b0b4987743e5e04b67bcfef087a63c932dd9f.cu
#include "qTensorLatticeModel.cuh" #include "../../inc/qTensorFunctions.h" //#incldue "qTensorfunctions.h" /*! \file qTensorLatticeModel.cu */ /*! \addtogroup modelKernels @{ */ __global__ void gpu_largestEigenvalue_kernel(dVec *Q,scalar *defects,int *t, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; if(t[idx]>0) return; scalar a,b,c; eigenvaluesOfQ(Q[idx],a,b,c); defects[idx] = max(max(a,b),c); return; } __global__ void gpu_computeDeterminant_kernel(dVec *Q,scalar *defects,int *t, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; if(t[idx]>0) return; defects[idx] = determinantOfQ(Q[idx]); return; } __global__ void gpu_degenerateEigenvalue_kernel(dVec *Q,scalar *defects,int *t, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; if(t[idx]>0) return; scalar trQ2 = TrQ2(Q[idx]); scalar det = determinantOfQ(Q[idx]); defects[idx] = trQ2*trQ2*trQ2 - 54.0*det*det; return; } __global__ void gpu_set_random_nematic_qTensors_kernel(dVec *pos, int *type, curandState *rngs,scalar amplitude, bool globallyAligned, scalar globalTheta, scalar globalPhi,int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; curandState randState; randState = rngs[idx]; scalar theta = acos(2.0*curand_uniform(&randState)-1); scalar phi = 2.0*PI*curand_uniform(&randState); if(globallyAligned) { theta = globalTheta; phi = globalPhi; } scalar3 n; n.x = cos(phi)*sin(theta); n.y = sin(phi)*sin(theta); n.z = cos(theta); if(type[idx] <=0) qTensorFromDirector(n, amplitude, pos[idx]); rngs[idx] = randState; return; }; __global__ void gpu_update_qTensor_simple_kernel(dVec *d_disp, dVec *d_pos, scalar scale, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; int pidx = idx/DIMENSION; if(pidx>=N) return; int didx = idx%DIMENSION; d_pos[pidx][didx] += scale*d_disp[pidx][didx]; /* scalar max = (didx >2 ) ? .5 : .834; scalar min = (didx >2 ) ? -.75 : -.667; if(d_pos[pidx][didx] > max) d_pos[pidx][didx] = max; if(d_pos[pidx][didx] < min) d_pos[pidx][didx] = min; */ return; }; __global__ void gpu_update_qTensor_simple_kernel(dVec *d_disp, dVec *d_pos, int N) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; int pidx = idx/DIMENSION; if(pidx>=N) return; int didx = idx%DIMENSION; d_pos[pidx][didx] += d_disp[pidx][didx]; /* scalar max = (didx >2 ) ? .5 : .834; scalar min = (didx >2 ) ? -.75 : -.667; if(d_pos[pidx][didx] > max) d_pos[pidx][didx] = max; if(d_pos[pidx][didx] < min) d_pos[pidx][didx] = min; */ return; }; bool gpu_update_qTensor(dVec *d_disp, dVec *Q, scalar scale, int N,int blockSize) { if (N < 128) blockSize = 16; unsigned int nBlocks = DIMENSION*N/blockSize + 1; if(DIMENSION <5) { printf("\nAttempting to initialize Q-tensors with incorrectly set dimension...change the root CMakeLists.txt file to have dimension 5 and recompile\n"); throw std::exception(); } gpu_update_qTensor_simple_kernel<<<nBlocks,blockSize>>>(d_disp, Q,scale,N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; bool gpu_update_qTensor(dVec *d_disp, dVec *Q, int N,int blockSize) { if (N < 128) blockSize = 16; unsigned int nBlocks = DIMENSION*N/blockSize + 1; if(DIMENSION <5) { printf("\nAttempting to initialize Q-tensors with incorrectly set dimension...change the root CMakeLists.txt file to have dimension 5 and recompile\n"); throw std::exception(); } gpu_update_qTensor_simple_kernel<<<nBlocks,blockSize>>>(d_disp, Q,N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; bool gpu_set_random_nematic_qTensors(dVec *d_pos, int *d_types, curandState *rngs, scalar amplitude, int blockSize, int nBlocks, bool globallyAligned, scalar theta, scalar phi, int N ) { if(DIMENSION <5) { printf("\nAttempting to initialize Q-tensors with incorrectly set dimension...change the root CMakeLists.txt file to have dimension 5 and recompile\n"); throw std::exception(); } gpu_set_random_nematic_qTensors_kernel<<<nBlocks,blockSize>>>(d_pos,d_types, rngs,amplitude, globallyAligned, theta, phi,N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; } bool gpu_get_qtensor_DefectMeasures(dVec *Q, scalar *defects, int *t, int defectType, int N) { //optimize block size later unsigned int block_size = 128; if (N < 128) block_size = 16; unsigned int nblocks = N/block_size + 1; if(defectType ==0) gpu_largestEigenvalue_kernel<<<nblocks,block_size>>>(Q,defects,t,N); if(defectType ==1) gpu_computeDeterminant_kernel<<<nblocks,block_size>>>(Q,defects,t,N); if(defectType ==2) gpu_degenerateEigenvalue_kernel<<<nblocks,block_size>>>(Q,defects,t,N); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /** @} */ //end of group declaration
1488b414d4cc86459075e76378933b8536bb3278.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" namespace cv { namespace gpu { namespace device { namespace imgproc { /////////////////////////////////// MeanShiftfiltering /////////////////////////////////////////////// texture<uchar4, 2> tex_meanshift; __device__ short2 do_mean_shift(int x0, int y0, unsigned char* out, size_t out_step, int cols, int rows, int sp, int sr, int maxIter, float eps) { int isr2 = sr*sr; uchar4 c = tex2D(tex_meanshift, x0, y0 ); // iterate meanshift procedure for( int iter = 0; iter < maxIter; iter++ ) { int count = 0; int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0; float icount; //mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp) int minx = x0-sp; int miny = y0-sp; int maxx = x0+sp; int maxy = y0+sp; for( int y = miny; y <= maxy; y++) { int rowCount = 0; for( int x = minx; x <= maxx; x++ ) { uchar4 t = tex2D( tex_meanshift, x, y ); int norm2 = (t.x - c.x) * (t.x - c.x) + (t.y - c.y) * (t.y - c.y) + (t.z - c.z) * (t.z - c.z); if( norm2 <= isr2 ) { s0 += t.x; s1 += t.y; s2 += t.z; sx += x; rowCount++; } } count += rowCount; sy += y*rowCount; } if( count == 0 ) break; icount = 1.f/count; int x1 = __float2int_rz(sx*icount); int y1 = __float2int_rz(sy*icount); s0 = __float2int_rz(s0*icount); s1 = __float2int_rz(s1*icount); s2 = __float2int_rz(s2*icount); int norm2 = (s0 - c.x) * (s0 - c.x) + (s1 - c.y) * (s1 - c.y) + (s2 - c.z) * (s2 - c.z); bool stopFlag = (x0 == x1 && y0 == y1) || (::abs(x1-x0) + ::abs(y1-y0) + norm2 <= eps); x0 = x1; y0 = y1; c.x = s0; c.y = s1; c.z = s2; if( stopFlag ) break; } int base = (blockIdx.y * blockDim.y + threadIdx.y) * out_step + (blockIdx.x * blockDim.x + threadIdx.x) * 4 * sizeof(uchar); *(uchar4*)(out + base) = c; return make_short2((short)x0, (short)y0); } __global__ void meanshift_kernel(unsigned char* out, size_t out_step, int cols, int rows, int sp, int sr, int maxIter, float eps ) { int x0 = blockIdx.x * blockDim.x + threadIdx.x; int y0 = blockIdx.y * blockDim.y + threadIdx.y; if( x0 < cols && y0 < rows ) do_mean_shift(x0, y0, out, out_step, cols, rows, sp, sr, maxIter, eps); } __global__ void meanshiftproc_kernel(unsigned char* outr, size_t outrstep, unsigned char* outsp, size_t outspstep, int cols, int rows, int sp, int sr, int maxIter, float eps) { int x0 = blockIdx.x * blockDim.x + threadIdx.x; int y0 = blockIdx.y * blockDim.y + threadIdx.y; if( x0 < cols && y0 < rows ) { int basesp = (blockIdx.y * blockDim.y + threadIdx.y) * outspstep + (blockIdx.x * blockDim.x + threadIdx.x) * 2 * sizeof(short); *(short2*)(outsp + basesp) = do_mean_shift(x0, y0, outr, outrstep, cols, rows, sp, sr, maxIter, eps); } } void meanShiftFiltering_gpu(const PtrStepSzb& src, PtrStepSzb dst, int sp, int sr, int maxIter, float eps, hipStream_t stream) { dim3 grid(1, 1, 1); dim3 threads(32, 8, 1); grid.x = divUp(src.cols, threads.x); grid.y = divUp(src.rows, threads.y); hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>(); cudaSafeCall( hipBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) ); hipLaunchKernelGGL(( meanshift_kernel), dim3(grid), dim3(threads), 0, stream , dst.data, dst.step, dst.cols, dst.rows, sp, sr, maxIter, eps ); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); //cudaSafeCall( hipUnbindTexture( tex_meanshift ) ); } void meanShiftProc_gpu(const PtrStepSzb& src, PtrStepSzb dstr, PtrStepSzb dstsp, int sp, int sr, int maxIter, float eps, hipStream_t stream) { dim3 grid(1, 1, 1); dim3 threads(32, 8, 1); grid.x = divUp(src.cols, threads.x); grid.y = divUp(src.rows, threads.y); hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>(); cudaSafeCall( hipBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) ); hipLaunchKernelGGL(( meanshiftproc_kernel), dim3(grid), dim3(threads), 0, stream , dstr.data, dstr.step, dstsp.data, dstsp.step, dstr.cols, dstr.rows, sp, sr, maxIter, eps ); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); //cudaSafeCall( hipUnbindTexture( tex_meanshift ) ); } /////////////////////////////////// drawColorDisp /////////////////////////////////////////////// template <typename T> __device__ unsigned int cvtPixel(T d, int ndisp, float S = 1, float V = 1) { unsigned int H = ((ndisp-d) * 240)/ndisp; unsigned int hi = (H/60) % 6; float f = H/60.f - H/60; float p = V * (1 - S); float q = V * (1 - f * S); float t = V * (1 - (1 - f) * S); float3 res; if (hi == 0) //R = V, G = t, B = p { res.x = p; res.y = t; res.z = V; } if (hi == 1) // R = q, G = V, B = p { res.x = p; res.y = V; res.z = q; } if (hi == 2) // R = p, G = V, B = t { res.x = t; res.y = V; res.z = p; } if (hi == 3) // R = p, G = q, B = V { res.x = V; res.y = q; res.z = p; } if (hi == 4) // R = t, G = p, B = V { res.x = V; res.y = p; res.z = t; } if (hi == 5) // R = V, G = p, B = q { res.x = q; res.y = p; res.z = V; } const unsigned int b = (unsigned int)(::max(0.f, ::min(res.x, 1.f)) * 255.f); const unsigned int g = (unsigned int)(::max(0.f, ::min(res.y, 1.f)) * 255.f); const unsigned int r = (unsigned int)(::max(0.f, ::min(res.z, 1.f)) * 255.f); const unsigned int a = 255U; return (a << 24) + (r << 16) + (g << 8) + b; } __global__ void drawColorDisp(uchar* disp, size_t disp_step, uchar* out_image, size_t out_step, int width, int height, int ndisp) { const int x = (blockIdx.x * blockDim.x + threadIdx.x) << 2; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { uchar4 d4 = *(uchar4*)(disp + y * disp_step + x); uint4 res; res.x = cvtPixel(d4.x, ndisp); res.y = cvtPixel(d4.y, ndisp); res.z = cvtPixel(d4.z, ndisp); res.w = cvtPixel(d4.w, ndisp); uint4* line = (uint4*)(out_image + y * out_step); line[x >> 2] = res; } } __global__ void drawColorDisp(short* disp, size_t disp_step, uchar* out_image, size_t out_step, int width, int height, int ndisp) { const int x = (blockIdx.x * blockDim.x + threadIdx.x) << 1; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { short2 d2 = *(short2*)(disp + y * disp_step + x); uint2 res; res.x = cvtPixel(d2.x, ndisp); res.y = cvtPixel(d2.y, ndisp); uint2* line = (uint2*)(out_image + y * out_step); line[x >> 1] = res; } } void drawColorDisp_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int ndisp, const hipStream_t& stream) { dim3 threads(16, 16, 1); dim3 grid(1, 1, 1); grid.x = divUp(src.cols, threads.x << 2); grid.y = divUp(src.rows, threads.y); hipLaunchKernelGGL(( drawColorDisp), dim3(grid), dim3(threads), 0, stream, src.data, src.step, dst.data, dst.step, src.cols, src.rows, ndisp); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } void drawColorDisp_gpu(const PtrStepSz<short>& src, const PtrStepSzb& dst, int ndisp, const hipStream_t& stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(src.cols, threads.x << 1); grid.y = divUp(src.rows, threads.y); hipLaunchKernelGGL(( drawColorDisp), dim3(grid), dim3(threads), 0, stream, src.data, src.step / sizeof(short), dst.data, dst.step, src.cols, src.rows, ndisp); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////// reprojectImageTo3D /////////////////////////////////////////////// __constant__ float cq[16]; template <typename T, typename D> __global__ void reprojectImageTo3D(const PtrStepSz<T> disp, PtrStep<D> xyz) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= disp.rows || x >= disp.cols) return; const float qx = x * cq[ 0] + y * cq[ 1] + cq[ 3]; const float qy = x * cq[ 4] + y * cq[ 5] + cq[ 7]; const float qz = x * cq[ 8] + y * cq[ 9] + cq[11]; const float qw = x * cq[12] + y * cq[13] + cq[15]; const T d = disp(y, x); const float iW = 1.f / (qw + cq[14] * d); D v = VecTraits<D>::all(1.0f); v.x = (qx + cq[2] * d) * iW; v.y = (qy + cq[6] * d) * iW; v.z = (qz + cq[10] * d) * iW; xyz(y, x) = v; } template <typename T, typename D> void reprojectImageTo3D_gpu(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, hipStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(disp.cols, block.x), divUp(disp.rows, block.y)); cudaSafeCall( hipMemcpyToSymbol(cq, q, 16 * sizeof(float)) ); hipLaunchKernelGGL(( reprojectImageTo3D<T, D>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<T>)disp, (PtrStepSz<D>)xyz); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void reprojectImageTo3D_gpu<uchar, float3>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, hipStream_t stream); template void reprojectImageTo3D_gpu<uchar, float4>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, hipStream_t stream); template void reprojectImageTo3D_gpu<short, float3>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, hipStream_t stream); template void reprojectImageTo3D_gpu<short, float4>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, hipStream_t stream); /////////////////////////////////////////// Corner Harris ///////////////////////////////////////////////// texture<float, hipTextureType2D, hipReadModeElementType> harrisDxTex(0, hipFilterModePoint, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> harrisDyTex(0, hipFilterModePoint, hipAddressModeClamp); __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { for (int j = jbegin; j < jend; ++j) { float dx = tex2D(harrisDxTex, j, i); float dy = tex2D(harrisDyTex, j, i); a += dx * dx; b += dx * dy; c += dy * dy; } } dst(y, x) = a * c - b * b - k * (a + c) * (a + c); } } template <typename BR, typename BC> __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst, const BR border_row, const BC border_col) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { const int y = border_col.idx_row(i); for (int j = jbegin; j < jend; ++j) { const int x = border_row.idx_col(j); float dx = tex2D(harrisDxTex, x, y); float dy = tex2D(harrisDyTex, x, y); a += dx * dx; b += dx * dy; c += dy * dy; } } dst(y, x) = a * c - b * b - k * (a + c) * (a + c); } } void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, hipStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y)); bindTexture(&harrisDxTex, Dx); bindTexture(&harrisDyTex, Dy); switch (border_type) { case BORDER_REFLECT101_GPU: hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows)); break; case BORDER_REFLECT_GPU: hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows)); break; case BORDER_REPLICATE_GPU: hipLaunchKernelGGL(( cornerHarris_kernel), dim3(grid), dim3(block), 0, stream, block_size, k, dst); break; } cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } /////////////////////////////////////////// Corner Min Eigen Val ///////////////////////////////////////////////// texture<float, hipTextureType2D, hipReadModeElementType> minEigenValDxTex(0, hipFilterModePoint, hipAddressModeClamp); texture<float, hipTextureType2D, hipReadModeElementType> minEigenValDyTex(0, hipFilterModePoint, hipAddressModeClamp); __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { for (int j = jbegin; j < jend; ++j) { float dx = tex2D(minEigenValDxTex, j, i); float dy = tex2D(minEigenValDyTex, j, i); a += dx * dx; b += dx * dy; c += dy * dy; } } a *= 0.5f; c *= 0.5f; dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b); } } template <typename BR, typename BC> __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst, const BR border_row, const BC border_col) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { int y = border_col.idx_row(i); for (int j = jbegin; j < jend; ++j) { int x = border_row.idx_col(j); float dx = tex2D(minEigenValDxTex, x, y); float dy = tex2D(minEigenValDyTex, x, y); a += dx * dx; b += dx * dy; c += dy * dy; } } a *= 0.5f; c *= 0.5f; dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b); } } void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, hipStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y)); bindTexture(&minEigenValDxTex, Dx); bindTexture(&minEigenValDyTex, Dy); switch (border_type) { case BORDER_REFLECT101_GPU: hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows)); break; case BORDER_REFLECT_GPU: hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows)); break; case BORDER_REPLICATE_GPU: hipLaunchKernelGGL(( cornerMinEigenVal_kernel), dim3(grid), dim3(block), 0, stream, block_size, dst); break; } cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } ////////////////////////////// Column Sum ////////////////////////////////////// __global__ void column_sumKernel_32F(int cols, int rows, const PtrStepb src, const PtrStepb dst) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < cols) { const unsigned char* src_data = src.data + x * sizeof(float); unsigned char* dst_data = dst.data + x * sizeof(float); float sum = 0.f; for (int y = 0; y < rows; ++y) { sum += *(const float*)src_data; *(float*)dst_data = sum; src_data += src.step; dst_data += dst.step; } } } void columnSum_32F(const PtrStepSzb src, const PtrStepSzb dst) { dim3 threads(256); dim3 grid(divUp(src.cols, threads.x)); hipLaunchKernelGGL(( column_sumKernel_32F), dim3(grid), dim3(threads), 0, 0, src.cols, src.rows, src, dst); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulSpectrums __global__ void mulSpectrumsKernel(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, PtrStepSz<hipfftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]); } } void mulSpectrums(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, PtrStepSz<hipfftComplex> c, hipStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); hipLaunchKernelGGL(( mulSpectrumsKernel), dim3(grid), dim3(threads), 0, stream, a, b, c); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulSpectrums_CONJ __global__ void mulSpectrumsKernel_CONJ(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, PtrStepSz<hipfftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x])); } } void mulSpectrums_CONJ(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, PtrStepSz<hipfftComplex> c, hipStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); hipLaunchKernelGGL(( mulSpectrumsKernel_CONJ), dim3(grid), dim3(threads), 0, stream, a, b, c); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums __global__ void mulAndScaleSpectrumsKernel(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, float scale, PtrStepSz<hipfftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { hipfftComplex v = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]); c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale); } } void mulAndScaleSpectrums(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, float scale, PtrStepSz<hipfftComplex> c, hipStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); hipLaunchKernelGGL(( mulAndScaleSpectrumsKernel), dim3(grid), dim3(threads), 0, stream, a, b, scale, c); cudaSafeCall( hipGetLastError() ); if (stream) cudaSafeCall( hipDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums_CONJ __global__ void mulAndScaleSpectrumsKernel_CONJ(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, float scale, PtrStepSz<hipfftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { hipfftComplex v = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x])); c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale); } } void mulAndScaleSpectrums_CONJ(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, float scale, PtrStepSz<hipfftComplex> c, hipStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); hipLaunchKernelGGL(( mulAndScaleSpectrumsKernel_CONJ), dim3(grid), dim3(threads), 0, stream, a, b, scale, c); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // buildWarpMaps // TODO use intrinsics like __sinf and so on namespace build_warp_maps { __constant__ float ck_rinv[9]; __constant__ float cr_kinv[9]; __constant__ float ct[3]; __constant__ float cscale; } class PlaneMapper { public: static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y) { using namespace build_warp_maps; float x_ = u / cscale - ct[0]; float y_ = v / cscale - ct[1]; float z; x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * (1 - ct[2]); y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * (1 - ct[2]); z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * (1 - ct[2]); x /= z; y /= z; } }; class CylindricalMapper { public: static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y) { using namespace build_warp_maps; u /= cscale; float x_ = ::sinf(u); float y_ = v / cscale; float z_ = ::cosf(u); float z; x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_; y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_; z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_; if (z > 0) { x /= z; y /= z; } else x = y = -1; } }; class SphericalMapper { public: static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y) { using namespace build_warp_maps; v /= cscale; u /= cscale; float sinv = ::sinf(v); float x_ = sinv * ::sinf(u); float y_ = -::cosf(v); float z_ = sinv * ::cosf(u); float z; x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_; y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_; z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_; if (z > 0) { x /= z; y /= z; } else x = y = -1; } }; template <typename Mapper> __global__ void buildWarpMapsKernel(int tl_u, int tl_v, int cols, int rows, PtrStepf map_x, PtrStepf map_y) { int du = blockIdx.x * blockDim.x + threadIdx.x; int dv = blockIdx.y * blockDim.y + threadIdx.y; if (du < cols && dv < rows) { float u = tl_u + du; float v = tl_v + dv; float x, y; Mapper::mapBackward(u, v, x, y); map_x.ptr(dv)[du] = x; map_y.ptr(dv)[du] = y; } } void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y, const float k_rinv[9], const float r_kinv[9], const float t[3], float scale, hipStream_t stream) { cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float))); cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float))); cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::ct, t, 3*sizeof(float))); cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float))); int cols = map_x.cols; int rows = map_x.rows; dim3 threads(32, 8); dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y)); hipLaunchKernelGGL(( buildWarpMapsKernel<PlaneMapper>), dim3(grid),dim3(threads), 0, 0, tl_u, tl_v, cols, rows, map_x, map_y); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y, const float k_rinv[9], const float r_kinv[9], float scale, hipStream_t stream) { cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float))); cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float))); cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float))); int cols = map_x.cols; int rows = map_x.rows; dim3 threads(32, 8); dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y)); hipLaunchKernelGGL(( buildWarpMapsKernel<CylindricalMapper>), dim3(grid),dim3(threads), 0, 0, tl_u, tl_v, cols, rows, map_x, map_y); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y, const float k_rinv[9], const float r_kinv[9], float scale, hipStream_t stream) { cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float))); cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float))); cudaSafeCall(hipMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float))); int cols = map_x.cols; int rows = map_x.rows; dim3 threads(32, 8); dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y)); hipLaunchKernelGGL(( buildWarpMapsKernel<SphericalMapper>), dim3(grid),dim3(threads), 0, 0, tl_u, tl_v, cols, rows, map_x, map_y); cudaSafeCall(hipGetLastError()); if (stream == 0) cudaSafeCall(hipDeviceSynchronize()); } ////////////////////////////////////////////////////////////////////////// // filter2D #define FILTER2D_MAX_KERNEL_SIZE 16 __constant__ float c_filter2DKernel[FILTER2D_MAX_KERNEL_SIZE * FILTER2D_MAX_KERNEL_SIZE]; template <class SrcT, typename D> __global__ void filter2D(const SrcT src, PtrStepSz<D> dst, const int kWidth, const int kHeight, const int anchorX, const int anchorY) { typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; sum_t res = VecTraits<sum_t>::all(0); int kInd = 0; for (int i = 0; i < kHeight; ++i) { for (int j = 0; j < kWidth; ++j) res = res + src(y - anchorY + i, x - anchorX + j) * c_filter2DKernel[kInd++]; } dst(y, x) = saturate_cast<D>(res); } template <typename T, typename D, template <typename> class Brd> struct Filter2DCaller; #define IMPLEMENT_FILTER2D_TEX_READER(type) \ texture< type , hipTextureType2D, hipReadModeElementType> tex_filter2D_ ## type (0, hipFilterModePoint, hipAddressModeClamp); \ struct tex_filter2D_ ## type ## _reader \ { \ typedef type elem_type; \ typedef int index_type; \ const int xoff; \ const int yoff; \ tex_filter2D_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \ __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \ { \ return tex2D(tex_filter2D_ ## type , x + xoff, y + yoff); \ } \ }; \ template <typename D, template <typename> class Brd> struct Filter2DCaller< type , D, Brd> \ { \ static void call(const PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz<D> dst, \ int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, hipStream_t stream) \ { \ typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \ dim3 block(16, 16); \ dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \ bindTexture(&tex_filter2D_ ## type , srcWhole); \ tex_filter2D_ ## type ##_reader texSrc(xoff, yoff); \ Brd<work_type> brd(dst.rows, dst.cols, VecTraits<work_type>::make(borderValue)); \ BorderReader< tex_filter2D_ ## type ##_reader, Brd<work_type> > brdSrc(texSrc, brd); \ hipLaunchKernelGGL(( filter2D), dim3(grid), dim3(block), 0, stream, brdSrc, dst, kWidth, kHeight, anchorX, anchorY); \ cudaSafeCall( hipGetLastError() ); \ if (stream == 0) \ cudaSafeCall( hipDeviceSynchronize() ); \ } \ }; IMPLEMENT_FILTER2D_TEX_READER(uchar); IMPLEMENT_FILTER2D_TEX_READER(uchar4); IMPLEMENT_FILTER2D_TEX_READER(ushort); IMPLEMENT_FILTER2D_TEX_READER(ushort4); IMPLEMENT_FILTER2D_TEX_READER(float); IMPLEMENT_FILTER2D_TEX_READER(float4); #undef IMPLEMENT_FILTER2D_TEX_READER template <typename T, typename D> void filter2D_gpu(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream) { typedef void (*func_t)(const PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<D> dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, hipStream_t stream); static const func_t funcs[] = { Filter2DCaller<T, D, BrdReflect101>::call, Filter2DCaller<T, D, BrdReplicate>::call, Filter2DCaller<T, D, BrdConstant>::call, Filter2DCaller<T, D, BrdReflect>::call, Filter2DCaller<T, D, BrdWrap>::call }; cudaSafeCall(hipMemcpyToSymbol(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, hipMemcpyDeviceToDevice) ); funcs[borderMode](static_cast< PtrStepSz<T> >(srcWhole), ofsX, ofsY, static_cast< PtrStepSz<D> >(dst), kWidth, kHeight, anchorX, anchorY, borderValue, stream); } template void filter2D_gpu<uchar, uchar>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream); template void filter2D_gpu<uchar4, uchar4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream); template void filter2D_gpu<ushort, ushort>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream); template void filter2D_gpu<ushort4, ushort4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream); template void filter2D_gpu<float, float>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream); template void filter2D_gpu<float4, float4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream); } // namespace imgproc }}} // namespace cv { namespace gpu { namespace device { #endif /* CUDA_DISABLER */
1488b414d4cc86459075e76378933b8536bb3278.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" namespace cv { namespace gpu { namespace device { namespace imgproc { /////////////////////////////////// MeanShiftfiltering /////////////////////////////////////////////// texture<uchar4, 2> tex_meanshift; __device__ short2 do_mean_shift(int x0, int y0, unsigned char* out, size_t out_step, int cols, int rows, int sp, int sr, int maxIter, float eps) { int isr2 = sr*sr; uchar4 c = tex2D(tex_meanshift, x0, y0 ); // iterate meanshift procedure for( int iter = 0; iter < maxIter; iter++ ) { int count = 0; int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0; float icount; //mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp) int minx = x0-sp; int miny = y0-sp; int maxx = x0+sp; int maxy = y0+sp; for( int y = miny; y <= maxy; y++) { int rowCount = 0; for( int x = minx; x <= maxx; x++ ) { uchar4 t = tex2D( tex_meanshift, x, y ); int norm2 = (t.x - c.x) * (t.x - c.x) + (t.y - c.y) * (t.y - c.y) + (t.z - c.z) * (t.z - c.z); if( norm2 <= isr2 ) { s0 += t.x; s1 += t.y; s2 += t.z; sx += x; rowCount++; } } count += rowCount; sy += y*rowCount; } if( count == 0 ) break; icount = 1.f/count; int x1 = __float2int_rz(sx*icount); int y1 = __float2int_rz(sy*icount); s0 = __float2int_rz(s0*icount); s1 = __float2int_rz(s1*icount); s2 = __float2int_rz(s2*icount); int norm2 = (s0 - c.x) * (s0 - c.x) + (s1 - c.y) * (s1 - c.y) + (s2 - c.z) * (s2 - c.z); bool stopFlag = (x0 == x1 && y0 == y1) || (::abs(x1-x0) + ::abs(y1-y0) + norm2 <= eps); x0 = x1; y0 = y1; c.x = s0; c.y = s1; c.z = s2; if( stopFlag ) break; } int base = (blockIdx.y * blockDim.y + threadIdx.y) * out_step + (blockIdx.x * blockDim.x + threadIdx.x) * 4 * sizeof(uchar); *(uchar4*)(out + base) = c; return make_short2((short)x0, (short)y0); } __global__ void meanshift_kernel(unsigned char* out, size_t out_step, int cols, int rows, int sp, int sr, int maxIter, float eps ) { int x0 = blockIdx.x * blockDim.x + threadIdx.x; int y0 = blockIdx.y * blockDim.y + threadIdx.y; if( x0 < cols && y0 < rows ) do_mean_shift(x0, y0, out, out_step, cols, rows, sp, sr, maxIter, eps); } __global__ void meanshiftproc_kernel(unsigned char* outr, size_t outrstep, unsigned char* outsp, size_t outspstep, int cols, int rows, int sp, int sr, int maxIter, float eps) { int x0 = blockIdx.x * blockDim.x + threadIdx.x; int y0 = blockIdx.y * blockDim.y + threadIdx.y; if( x0 < cols && y0 < rows ) { int basesp = (blockIdx.y * blockDim.y + threadIdx.y) * outspstep + (blockIdx.x * blockDim.x + threadIdx.x) * 2 * sizeof(short); *(short2*)(outsp + basesp) = do_mean_shift(x0, y0, outr, outrstep, cols, rows, sp, sr, maxIter, eps); } } void meanShiftFiltering_gpu(const PtrStepSzb& src, PtrStepSzb dst, int sp, int sr, int maxIter, float eps, cudaStream_t stream) { dim3 grid(1, 1, 1); dim3 threads(32, 8, 1); grid.x = divUp(src.cols, threads.x); grid.y = divUp(src.rows, threads.y); cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>(); cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) ); meanshift_kernel<<< grid, threads, 0, stream >>>( dst.data, dst.step, dst.cols, dst.rows, sp, sr, maxIter, eps ); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); //cudaSafeCall( cudaUnbindTexture( tex_meanshift ) ); } void meanShiftProc_gpu(const PtrStepSzb& src, PtrStepSzb dstr, PtrStepSzb dstsp, int sp, int sr, int maxIter, float eps, cudaStream_t stream) { dim3 grid(1, 1, 1); dim3 threads(32, 8, 1); grid.x = divUp(src.cols, threads.x); grid.y = divUp(src.rows, threads.y); cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>(); cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) ); meanshiftproc_kernel<<< grid, threads, 0, stream >>>( dstr.data, dstr.step, dstsp.data, dstsp.step, dstr.cols, dstr.rows, sp, sr, maxIter, eps ); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); //cudaSafeCall( cudaUnbindTexture( tex_meanshift ) ); } /////////////////////////////////// drawColorDisp /////////////////////////////////////////////// template <typename T> __device__ unsigned int cvtPixel(T d, int ndisp, float S = 1, float V = 1) { unsigned int H = ((ndisp-d) * 240)/ndisp; unsigned int hi = (H/60) % 6; float f = H/60.f - H/60; float p = V * (1 - S); float q = V * (1 - f * S); float t = V * (1 - (1 - f) * S); float3 res; if (hi == 0) //R = V, G = t, B = p { res.x = p; res.y = t; res.z = V; } if (hi == 1) // R = q, G = V, B = p { res.x = p; res.y = V; res.z = q; } if (hi == 2) // R = p, G = V, B = t { res.x = t; res.y = V; res.z = p; } if (hi == 3) // R = p, G = q, B = V { res.x = V; res.y = q; res.z = p; } if (hi == 4) // R = t, G = p, B = V { res.x = V; res.y = p; res.z = t; } if (hi == 5) // R = V, G = p, B = q { res.x = q; res.y = p; res.z = V; } const unsigned int b = (unsigned int)(::max(0.f, ::min(res.x, 1.f)) * 255.f); const unsigned int g = (unsigned int)(::max(0.f, ::min(res.y, 1.f)) * 255.f); const unsigned int r = (unsigned int)(::max(0.f, ::min(res.z, 1.f)) * 255.f); const unsigned int a = 255U; return (a << 24) + (r << 16) + (g << 8) + b; } __global__ void drawColorDisp(uchar* disp, size_t disp_step, uchar* out_image, size_t out_step, int width, int height, int ndisp) { const int x = (blockIdx.x * blockDim.x + threadIdx.x) << 2; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { uchar4 d4 = *(uchar4*)(disp + y * disp_step + x); uint4 res; res.x = cvtPixel(d4.x, ndisp); res.y = cvtPixel(d4.y, ndisp); res.z = cvtPixel(d4.z, ndisp); res.w = cvtPixel(d4.w, ndisp); uint4* line = (uint4*)(out_image + y * out_step); line[x >> 2] = res; } } __global__ void drawColorDisp(short* disp, size_t disp_step, uchar* out_image, size_t out_step, int width, int height, int ndisp) { const int x = (blockIdx.x * blockDim.x + threadIdx.x) << 1; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { short2 d2 = *(short2*)(disp + y * disp_step + x); uint2 res; res.x = cvtPixel(d2.x, ndisp); res.y = cvtPixel(d2.y, ndisp); uint2* line = (uint2*)(out_image + y * out_step); line[x >> 1] = res; } } void drawColorDisp_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int ndisp, const cudaStream_t& stream) { dim3 threads(16, 16, 1); dim3 grid(1, 1, 1); grid.x = divUp(src.cols, threads.x << 2); grid.y = divUp(src.rows, threads.y); drawColorDisp<<<grid, threads, 0, stream>>>(src.data, src.step, dst.data, dst.step, src.cols, src.rows, ndisp); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } void drawColorDisp_gpu(const PtrStepSz<short>& src, const PtrStepSzb& dst, int ndisp, const cudaStream_t& stream) { dim3 threads(32, 8, 1); dim3 grid(1, 1, 1); grid.x = divUp(src.cols, threads.x << 1); grid.y = divUp(src.rows, threads.y); drawColorDisp<<<grid, threads, 0, stream>>>(src.data, src.step / sizeof(short), dst.data, dst.step, src.cols, src.rows, ndisp); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////// reprojectImageTo3D /////////////////////////////////////////////// __constant__ float cq[16]; template <typename T, typename D> __global__ void reprojectImageTo3D(const PtrStepSz<T> disp, PtrStep<D> xyz) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= disp.rows || x >= disp.cols) return; const float qx = x * cq[ 0] + y * cq[ 1] + cq[ 3]; const float qy = x * cq[ 4] + y * cq[ 5] + cq[ 7]; const float qz = x * cq[ 8] + y * cq[ 9] + cq[11]; const float qw = x * cq[12] + y * cq[13] + cq[15]; const T d = disp(y, x); const float iW = 1.f / (qw + cq[14] * d); D v = VecTraits<D>::all(1.0f); v.x = (qx + cq[2] * d) * iW; v.y = (qy + cq[6] * d) * iW; v.z = (qz + cq[10] * d) * iW; xyz(y, x) = v; } template <typename T, typename D> void reprojectImageTo3D_gpu(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(disp.cols, block.x), divUp(disp.rows, block.y)); cudaSafeCall( cudaMemcpyToSymbol(cq, q, 16 * sizeof(float)) ); reprojectImageTo3D<T, D><<<grid, block, 0, stream>>>((PtrStepSz<T>)disp, (PtrStepSz<D>)xyz); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void reprojectImageTo3D_gpu<uchar, float3>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream); template void reprojectImageTo3D_gpu<uchar, float4>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream); template void reprojectImageTo3D_gpu<short, float3>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream); template void reprojectImageTo3D_gpu<short, float4>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream); /////////////////////////////////////////// Corner Harris ///////////////////////////////////////////////// texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDxTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDyTex(0, cudaFilterModePoint, cudaAddressModeClamp); __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { for (int j = jbegin; j < jend; ++j) { float dx = tex2D(harrisDxTex, j, i); float dy = tex2D(harrisDyTex, j, i); a += dx * dx; b += dx * dy; c += dy * dy; } } dst(y, x) = a * c - b * b - k * (a + c) * (a + c); } } template <typename BR, typename BC> __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst, const BR border_row, const BC border_col) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { const int y = border_col.idx_row(i); for (int j = jbegin; j < jend; ++j) { const int x = border_row.idx_col(j); float dx = tex2D(harrisDxTex, x, y); float dy = tex2D(harrisDyTex, x, y); a += dx * dx; b += dx * dy; c += dy * dy; } } dst(y, x) = a * c - b * b - k * (a + c) * (a + c); } } void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y)); bindTexture(&harrisDxTex, Dx); bindTexture(&harrisDyTex, Dy); switch (border_type) { case BORDER_REFLECT101_GPU: cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows)); break; case BORDER_REFLECT_GPU: cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows)); break; case BORDER_REPLICATE_GPU: cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst); break; } cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } /////////////////////////////////////////// Corner Min Eigen Val ///////////////////////////////////////////////// texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDxTex(0, cudaFilterModePoint, cudaAddressModeClamp); texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDyTex(0, cudaFilterModePoint, cudaAddressModeClamp); __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { for (int j = jbegin; j < jend; ++j) { float dx = tex2D(minEigenValDxTex, j, i); float dy = tex2D(minEigenValDyTex, j, i); a += dx * dx; b += dx * dy; c += dy * dy; } } a *= 0.5f; c *= 0.5f; dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b); } } template <typename BR, typename BC> __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst, const BR border_row, const BC border_col) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float a = 0.f; float b = 0.f; float c = 0.f; const int ibegin = y - (block_size / 2); const int jbegin = x - (block_size / 2); const int iend = ibegin + block_size; const int jend = jbegin + block_size; for (int i = ibegin; i < iend; ++i) { int y = border_col.idx_row(i); for (int j = jbegin; j < jend; ++j) { int x = border_row.idx_col(j); float dx = tex2D(minEigenValDxTex, x, y); float dy = tex2D(minEigenValDyTex, x, y); a += dx * dx; b += dx * dy; c += dy * dy; } } a *= 0.5f; c *= 0.5f; dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b); } } void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y)); bindTexture(&minEigenValDxTex, Dx); bindTexture(&minEigenValDyTex, Dy); switch (border_type) { case BORDER_REFLECT101_GPU: cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows)); break; case BORDER_REFLECT_GPU: cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows)); break; case BORDER_REPLICATE_GPU: cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst); break; } cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } ////////////////////////////// Column Sum ////////////////////////////////////// __global__ void column_sumKernel_32F(int cols, int rows, const PtrStepb src, const PtrStepb dst) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < cols) { const unsigned char* src_data = src.data + x * sizeof(float); unsigned char* dst_data = dst.data + x * sizeof(float); float sum = 0.f; for (int y = 0; y < rows; ++y) { sum += *(const float*)src_data; *(float*)dst_data = sum; src_data += src.step; dst_data += dst.step; } } } void columnSum_32F(const PtrStepSzb src, const PtrStepSzb dst) { dim3 threads(256); dim3 grid(divUp(src.cols, threads.x)); column_sumKernel_32F<<<grid, threads>>>(src.cols, src.rows, src, dst); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulSpectrums __global__ void mulSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]); } } void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); mulSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, c); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulSpectrums_CONJ __global__ void mulSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x])); } } void mulSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); mulSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, c); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums __global__ void mulAndScaleSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { cufftComplex v = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]); c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale); } } void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); mulAndScaleSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, scale, c); cudaSafeCall( cudaGetLastError() ); if (stream) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums_CONJ __global__ void mulAndScaleSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { cufftComplex v = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x])); c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale); } } void mulAndScaleSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); mulAndScaleSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, scale, c); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // buildWarpMaps // TODO use intrinsics like __sinf and so on namespace build_warp_maps { __constant__ float ck_rinv[9]; __constant__ float cr_kinv[9]; __constant__ float ct[3]; __constant__ float cscale; } class PlaneMapper { public: static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y) { using namespace build_warp_maps; float x_ = u / cscale - ct[0]; float y_ = v / cscale - ct[1]; float z; x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * (1 - ct[2]); y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * (1 - ct[2]); z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * (1 - ct[2]); x /= z; y /= z; } }; class CylindricalMapper { public: static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y) { using namespace build_warp_maps; u /= cscale; float x_ = ::sinf(u); float y_ = v / cscale; float z_ = ::cosf(u); float z; x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_; y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_; z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_; if (z > 0) { x /= z; y /= z; } else x = y = -1; } }; class SphericalMapper { public: static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y) { using namespace build_warp_maps; v /= cscale; u /= cscale; float sinv = ::sinf(v); float x_ = sinv * ::sinf(u); float y_ = -::cosf(v); float z_ = sinv * ::cosf(u); float z; x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_; y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_; z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_; if (z > 0) { x /= z; y /= z; } else x = y = -1; } }; template <typename Mapper> __global__ void buildWarpMapsKernel(int tl_u, int tl_v, int cols, int rows, PtrStepf map_x, PtrStepf map_y) { int du = blockIdx.x * blockDim.x + threadIdx.x; int dv = blockIdx.y * blockDim.y + threadIdx.y; if (du < cols && dv < rows) { float u = tl_u + du; float v = tl_v + dv; float x, y; Mapper::mapBackward(u, v, x, y); map_x.ptr(dv)[du] = x; map_y.ptr(dv)[du] = y; } } void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y, const float k_rinv[9], const float r_kinv[9], const float t[3], float scale, cudaStream_t stream) { cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float))); cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float))); cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ct, t, 3*sizeof(float))); cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float))); int cols = map_x.cols; int rows = map_x.rows; dim3 threads(32, 8); dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y)); buildWarpMapsKernel<PlaneMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y, const float k_rinv[9], const float r_kinv[9], float scale, cudaStream_t stream) { cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float))); cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float))); cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float))); int cols = map_x.cols; int rows = map_x.rows; dim3 threads(32, 8); dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y)); buildWarpMapsKernel<CylindricalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y, const float k_rinv[9], const float r_kinv[9], float scale, cudaStream_t stream) { cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float))); cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float))); cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float))); int cols = map_x.cols; int rows = map_x.rows; dim3 threads(32, 8); dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y)); buildWarpMapsKernel<SphericalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y); cudaSafeCall(cudaGetLastError()); if (stream == 0) cudaSafeCall(cudaDeviceSynchronize()); } ////////////////////////////////////////////////////////////////////////// // filter2D #define FILTER2D_MAX_KERNEL_SIZE 16 __constant__ float c_filter2DKernel[FILTER2D_MAX_KERNEL_SIZE * FILTER2D_MAX_KERNEL_SIZE]; template <class SrcT, typename D> __global__ void filter2D(const SrcT src, PtrStepSz<D> dst, const int kWidth, const int kHeight, const int anchorX, const int anchorY) { typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; sum_t res = VecTraits<sum_t>::all(0); int kInd = 0; for (int i = 0; i < kHeight; ++i) { for (int j = 0; j < kWidth; ++j) res = res + src(y - anchorY + i, x - anchorX + j) * c_filter2DKernel[kInd++]; } dst(y, x) = saturate_cast<D>(res); } template <typename T, typename D, template <typename> class Brd> struct Filter2DCaller; #define IMPLEMENT_FILTER2D_TEX_READER(type) \ texture< type , cudaTextureType2D, cudaReadModeElementType> tex_filter2D_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \ struct tex_filter2D_ ## type ## _reader \ { \ typedef type elem_type; \ typedef int index_type; \ const int xoff; \ const int yoff; \ tex_filter2D_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \ __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \ { \ return tex2D(tex_filter2D_ ## type , x + xoff, y + yoff); \ } \ }; \ template <typename D, template <typename> class Brd> struct Filter2DCaller< type , D, Brd> \ { \ static void call(const PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz<D> dst, \ int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream) \ { \ typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \ dim3 block(16, 16); \ dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \ bindTexture(&tex_filter2D_ ## type , srcWhole); \ tex_filter2D_ ## type ##_reader texSrc(xoff, yoff); \ Brd<work_type> brd(dst.rows, dst.cols, VecTraits<work_type>::make(borderValue)); \ BorderReader< tex_filter2D_ ## type ##_reader, Brd<work_type> > brdSrc(texSrc, brd); \ filter2D<<<grid, block, 0, stream>>>(brdSrc, dst, kWidth, kHeight, anchorX, anchorY); \ cudaSafeCall( cudaGetLastError() ); \ if (stream == 0) \ cudaSafeCall( cudaDeviceSynchronize() ); \ } \ }; IMPLEMENT_FILTER2D_TEX_READER(uchar); IMPLEMENT_FILTER2D_TEX_READER(uchar4); IMPLEMENT_FILTER2D_TEX_READER(ushort); IMPLEMENT_FILTER2D_TEX_READER(ushort4); IMPLEMENT_FILTER2D_TEX_READER(float); IMPLEMENT_FILTER2D_TEX_READER(float4); #undef IMPLEMENT_FILTER2D_TEX_READER template <typename T, typename D> void filter2D_gpu(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream) { typedef void (*func_t)(const PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<D> dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream); static const func_t funcs[] = { Filter2DCaller<T, D, BrdReflect101>::call, Filter2DCaller<T, D, BrdReplicate>::call, Filter2DCaller<T, D, BrdConstant>::call, Filter2DCaller<T, D, BrdReflect>::call, Filter2DCaller<T, D, BrdWrap>::call }; cudaSafeCall(cudaMemcpyToSymbol(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, cudaMemcpyDeviceToDevice) ); funcs[borderMode](static_cast< PtrStepSz<T> >(srcWhole), ofsX, ofsY, static_cast< PtrStepSz<D> >(dst), kWidth, kHeight, anchorX, anchorY, borderValue, stream); } template void filter2D_gpu<uchar, uchar>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream); template void filter2D_gpu<uchar4, uchar4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream); template void filter2D_gpu<ushort, ushort>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream); template void filter2D_gpu<ushort4, ushort4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream); template void filter2D_gpu<float, float>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream); template void filter2D_gpu<float4, float4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream); } // namespace imgproc }}} // namespace cv { namespace gpu { namespace device { #endif /* CUDA_DISABLER */
d5bb4506136f2470c361f3eb51b4fa3afc4f0095.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cmath> #include <cstdint> #include <hip/hip_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/cuda/numeric.cuh" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/kernels/math.h" #include "chainerx/numeric.h" #include "chainerx/routines/math.h" #include "chainerx/routines/type_util.h" #include "chainerx/scalar.h" namespace chainerx { namespace cuda { namespace { template <typename In, typename Out> struct IfLessElseASSAImpl { using InCudaType = cuda_internal::DataType<In>; using OutCudaType = cuda_internal::DataType<Out>; __device__ void operator()(int64_t /*i*/, InCudaType x1, OutCudaType neg, OutCudaType& out) { out = x1 < x2 ? pos : neg; } InCudaType x2; OutCudaType pos; }; class CudaIfLessElseASSAKernel : public IfLessElseASSAKernel { public: void Call(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, neg, out); Dtype x_dtype = ResultType(x1, x2); const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype); const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(x_dtype, [&](auto x_pt) { using In = typename decltype(x_pt)::type; using InCudaType = cuda_internal::DataType<In>; VisitDtype(out.dtype(), [&](auto pt) { using Out = typename decltype(pt)::type; using OutCudaType = cuda_internal::DataType<Out>; Elementwise<const In, const Out, Out>( IfLessElseASSAImpl<In, Out>{static_cast<InCudaType>(x2), static_cast<OutCudaType>(pos)}, x1_cast, neg_cast, out); }); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(IfLessElseASSAKernel, CudaIfLessElseASSAKernel); template <typename In, typename Out> struct IfGreaterElseASSAImpl { using InCudaType = cuda_internal::DataType<In>; using OutCudaType = cuda_internal::DataType<Out>; __device__ void operator()(int64_t /*i*/, InCudaType x1, OutCudaType neg, OutCudaType& out) { out = x1 > x2 ? pos : neg; } InCudaType x2; OutCudaType pos; }; class CudaIfGreaterElseASSAKernel : public IfGreaterElseASSAKernel { public: void Call(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, neg, out); Dtype x_dtype = ResultType(x1, x2); const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype); const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(x_dtype, [&](auto x_pt) { using In = typename decltype(x_pt)::type; using InCudaType = cuda_internal::DataType<In>; VisitDtype(out.dtype(), [&](auto pt) { using Out = typename decltype(pt)::type; using OutCudaType = cuda_internal::DataType<Out>; Elementwise<const In, const Out, Out>( IfGreaterElseASSAImpl<In, Out>{static_cast<InCudaType>(x2), static_cast<OutCudaType>(pos)}, x1_cast, neg_cast, out); }); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(IfGreaterElseASSAKernel, CudaIfGreaterElseASSAKernel); template <typename In, typename Out> struct IfGreaterElseAAAAImpl { using InCudaType = cuda_internal::DataType<In>; using OutCudaType = cuda_internal::DataType<Out>; __device__ void operator()(int64_t /*i*/, InCudaType x1, InCudaType x2, OutCudaType pos, OutCudaType neg, OutCudaType& out) { out = x1 > x2 ? pos : neg; } }; class CudaIfGreaterElseAAAAKernel : public IfGreaterElseAAAAKernel { public: void Call(const Array& x1, const Array& x2, const Array& pos, const Array& neg, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, pos, neg, out); Dtype x_dtype = ResultType(x1, x2); const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype); const Array& x2_cast = x2.dtype() == x_dtype ? x2 : x2.AsType(x_dtype); const Array& pos_cast = pos.dtype() == out.dtype() ? pos : pos.AsType(out.dtype()); const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(x_dtype, [&](auto x_pt) { using In = typename decltype(x_pt)::type; VisitDtype(out.dtype(), [&](auto pt) { using Out = typename decltype(pt)::type; Elementwise<const In, const In, const Out, const Out, Out>( IfGreaterElseAAAAImpl<In, Out>{}, x1_cast, x2_cast, pos_cast, neg_cast, out); }); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(IfGreaterElseAAAAKernel, CudaIfGreaterElseAAAAKernel); template <typename T> struct TanhImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Tanh(x); } }; } // namespace } // namespace cuda } // namespace chainerx
d5bb4506136f2470c361f3eb51b4fa3afc4f0095.cu
#include "chainerx/cuda/cuda_device.h" #include <cmath> #include <cstdint> #include <cuda_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/kernel_regist.h" #include "chainerx/cuda/numeric.cuh" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/kernels/math.h" #include "chainerx/numeric.h" #include "chainerx/routines/math.h" #include "chainerx/routines/type_util.h" #include "chainerx/scalar.h" namespace chainerx { namespace cuda { namespace { template <typename In, typename Out> struct IfLessElseASSAImpl { using InCudaType = cuda_internal::DataType<In>; using OutCudaType = cuda_internal::DataType<Out>; __device__ void operator()(int64_t /*i*/, InCudaType x1, OutCudaType neg, OutCudaType& out) { out = x1 < x2 ? pos : neg; } InCudaType x2; OutCudaType pos; }; class CudaIfLessElseASSAKernel : public IfLessElseASSAKernel { public: void Call(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, neg, out); Dtype x_dtype = ResultType(x1, x2); const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype); const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(x_dtype, [&](auto x_pt) { using In = typename decltype(x_pt)::type; using InCudaType = cuda_internal::DataType<In>; VisitDtype(out.dtype(), [&](auto pt) { using Out = typename decltype(pt)::type; using OutCudaType = cuda_internal::DataType<Out>; Elementwise<const In, const Out, Out>( IfLessElseASSAImpl<In, Out>{static_cast<InCudaType>(x2), static_cast<OutCudaType>(pos)}, x1_cast, neg_cast, out); }); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(IfLessElseASSAKernel, CudaIfLessElseASSAKernel); template <typename In, typename Out> struct IfGreaterElseASSAImpl { using InCudaType = cuda_internal::DataType<In>; using OutCudaType = cuda_internal::DataType<Out>; __device__ void operator()(int64_t /*i*/, InCudaType x1, OutCudaType neg, OutCudaType& out) { out = x1 > x2 ? pos : neg; } InCudaType x2; OutCudaType pos; }; class CudaIfGreaterElseASSAKernel : public IfGreaterElseASSAKernel { public: void Call(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, neg, out); Dtype x_dtype = ResultType(x1, x2); const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype); const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(x_dtype, [&](auto x_pt) { using In = typename decltype(x_pt)::type; using InCudaType = cuda_internal::DataType<In>; VisitDtype(out.dtype(), [&](auto pt) { using Out = typename decltype(pt)::type; using OutCudaType = cuda_internal::DataType<Out>; Elementwise<const In, const Out, Out>( IfGreaterElseASSAImpl<In, Out>{static_cast<InCudaType>(x2), static_cast<OutCudaType>(pos)}, x1_cast, neg_cast, out); }); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(IfGreaterElseASSAKernel, CudaIfGreaterElseASSAKernel); template <typename In, typename Out> struct IfGreaterElseAAAAImpl { using InCudaType = cuda_internal::DataType<In>; using OutCudaType = cuda_internal::DataType<Out>; __device__ void operator()(int64_t /*i*/, InCudaType x1, InCudaType x2, OutCudaType pos, OutCudaType neg, OutCudaType& out) { out = x1 > x2 ? pos : neg; } }; class CudaIfGreaterElseAAAAKernel : public IfGreaterElseAAAAKernel { public: void Call(const Array& x1, const Array& x2, const Array& pos, const Array& neg, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, pos, neg, out); Dtype x_dtype = ResultType(x1, x2); const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype); const Array& x2_cast = x2.dtype() == x_dtype ? x2 : x2.AsType(x_dtype); const Array& pos_cast = pos.dtype() == out.dtype() ? pos : pos.AsType(out.dtype()); const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(x_dtype, [&](auto x_pt) { using In = typename decltype(x_pt)::type; VisitDtype(out.dtype(), [&](auto pt) { using Out = typename decltype(pt)::type; Elementwise<const In, const In, const Out, const Out, Out>( IfGreaterElseAAAAImpl<In, Out>{}, x1_cast, x2_cast, pos_cast, neg_cast, out); }); }); } }; CHAINERX_CUDA_REGISTER_KERNEL(IfGreaterElseAAAAKernel, CudaIfGreaterElseAAAAKernel); template <typename T> struct TanhImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Tanh(x); } }; } // namespace } // namespace cuda } // namespace chainerx
b4eac3132acab3c7868a44bf2e59c0cf5def76a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * distance.cu * Copyright (C) 2016- CloudBrain <byzhang@> */ #include "distance.h" __global__ void hamming_distance(KEY_T* keys, uint32_t *values, const uint32_t *query, hipTextureObject_t tex, unsigned int tex_height, int num_dim, int num_data_per_block, int overall_data) { int tu = blockIdx.x; int tv = threadIdx.x; unsigned int id = tu*num_data_per_block + tv; if (id < overall_data) { extern __shared__ uint32_t query_local[]; if (tv < num_dim) { query_local[tv] = query[tv]; } __syncthreads(); KEY_T count = 0; for (int i = 0; i<num_dim; ++i) { unsigned int m = tex2D<unsigned int>(tex, tv * num_dim + i, tu); count += __popc(m ^ query_local[i]); } keys[id] = count; values[id] = id; } }
b4eac3132acab3c7868a44bf2e59c0cf5def76a3.cu
/* * distance.cu * Copyright (C) 2016- CloudBrain <byzhang@> */ #include "distance.h" __global__ void hamming_distance(KEY_T* keys, uint32_t *values, const uint32_t *query, cudaTextureObject_t tex, unsigned int tex_height, int num_dim, int num_data_per_block, int overall_data) { int tu = blockIdx.x; int tv = threadIdx.x; unsigned int id = tu*num_data_per_block + tv; if (id < overall_data) { extern __shared__ uint32_t query_local[]; if (tv < num_dim) { query_local[tv] = query[tv]; } __syncthreads(); KEY_T count = 0; for (int i = 0; i<num_dim; ++i) { unsigned int m = tex2D<unsigned int>(tex, tv * num_dim + i, tu); count += __popc(m ^ query_local[i]); } keys[id] = count; values[id] = id; } }
d408dda8a283a670a81950870ad2ad3c141861ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/layers/element_wise_function.hpp" #include "HugeCTR/include/layers/multiply_layer.hpp" #include "HugeCTR/include/utils.cuh" #include "HugeCTR/include/utils.hpp" #include <algorithm> #include <functional> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { #define BLOCK_DIM_SIZE 32 template <typename T> __global__ void multiply_kernel(const T* input, const T* weight, T* output, int batch_size, int slot_num, int embedding_vec_size) { if ((blockIdx.x < batch_size) && (threadIdx.x < embedding_vec_size)) { for (int i = 0; i < slot_num; i++) { output[blockIdx.x * slot_num * embedding_vec_size + i * embedding_vec_size + threadIdx.x] = input[blockIdx.x * slot_num + i] * weight[i * embedding_vec_size + threadIdx.x]; } } } template <typename T> __global__ void multiply_transpose_fuse_kernel(int batch_size, int slot_num, int embedding_vec_size, const T* top_grad, const T* input, T* wgrad_tmp_trans) { int row = batch_size; int col = slot_num * embedding_vec_size; __shared__ T sh_data[BLOCK_DIM_SIZE + 1][BLOCK_DIM_SIZE]; int src_index_x = blockIdx.x * blockDim.x + threadIdx.x; int src_index_y = blockIdx.y * blockDim.y + threadIdx.y; if ((src_index_x < col) && (src_index_y < row)) { int index_in = src_index_y * col + src_index_x; sh_data[threadIdx.x][threadIdx.y] = top_grad[index_in] * input[index_in / embedding_vec_size]; } __syncthreads(); int dst_index_x = blockIdx.y * blockDim.y + threadIdx.x; int dst_index_y = blockIdx.x * blockDim.x + threadIdx.y; if ((dst_index_x < row) && (dst_index_y < col)) { int index_out = dst_index_y * row + dst_index_x; wgrad_tmp_trans[index_out] = sh_data[threadIdx.y][threadIdx.x]; } } // sum reduce computation in one block template <typename T> __global__ void sum_reduce_batch_kernel(int row, // row=gridDim.x int col, const T* input, T* output) { float local_sum = 0.0f; for (int tid = threadIdx.x; tid < col; tid += blockDim.x) { local_sum += input[blockIdx.x * col + tid]; } __syncthreads(); local_sum = blockReduceSum(local_sum); if (threadIdx.x == 0) { output[blockIdx.x] += local_sum; } } template <typename T> __global__ void multiply_dgrad_kernel(const T* top_grad, const T* weight, T* dgrad, int batch_size, int slot_num, int embedding_vec_size) { if ((blockIdx.x < batch_size) && (threadIdx.x < embedding_vec_size)) { for (int i = 0; i < slot_num; i++) { T local_sum = top_grad[blockIdx.x * slot_num * embedding_vec_size + i * embedding_vec_size + threadIdx.x] * weight[i * embedding_vec_size + threadIdx.x]; local_sum = blockReduceSum(local_sum); if (threadIdx.x == 0) { dgrad[blockIdx.x * slot_num + i] = local_sum; } } } } template <typename T> void multiply_wgrad(const T* top_grad, const T* input, T* wgrad, T* wgrad_tmp_trans, int batch_size, int slot_num, int embedding_vec_size, hipStream_t stream) { dim3 blockSize1(BLOCK_DIM_SIZE, BLOCK_DIM_SIZE, 1); dim3 gridSize1((slot_num * embedding_vec_size + blockSize1.x - 1) / blockSize1.x, (batch_size + blockSize1.y - 1) / blockSize1.y, 1); hipLaunchKernelGGL(( multiply_transpose_fuse_kernel), dim3(gridSize1), dim3(blockSize1), 0, stream, batch_size, slot_num, embedding_vec_size, top_grad, input, wgrad_tmp_trans); dim3 blockSize2(256, 1, 1); dim3 gridSize2(slot_num * embedding_vec_size, 1, 1); hipLaunchKernelGGL(( sum_reduce_batch_kernel), dim3(gridSize2), dim3(blockSize2), 0, stream, slot_num * embedding_vec_size, batch_size, wgrad_tmp_trans, wgrad); } template <typename T> void multiply_dgrad(const T* top_grad, const T* weight, T* dgrad, int batch_size, int slot_num, int embedding_vec_size, hipStream_t stream) { dim3 blockSize(embedding_vec_size, 1, 1); // note that embedding_vec_size should be < 1024 dim3 gridSize(batch_size, 1, 1); hipLaunchKernelGGL(( multiply_dgrad_kernel), dim3(gridSize), dim3(blockSize), 0, stream, top_grad, weight, dgrad, batch_size, slot_num, embedding_vec_size); } } // end of namespace MultiplyLayer::MultiplyLayer(const std::shared_ptr<GeneralBuffer<float>>& weight_buff, const std::shared_ptr<GeneralBuffer<float>>& wgrad_buff, const std::shared_ptr<GeneralBuffer<float>>& blob_buff, const std::shared_ptr<Tensor<float>>& in_tensor, std::shared_ptr<Tensor<float>>& out_tensor, const std::vector<size_t>& weight_dims, int device_id, std::vector<Initializer_t> initializer_types) : Layer(device_id, initializer_types) { try { CudaDeviceContext context(get_device_id()); auto in_dims = in_tensor->get_dims(); if (in_dims.size() != 2) { CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be multiplied"); } if (in_tensor->get_format() != TensorFormat_t::HW) { CK_THROW_(Error_t::WrongInput, "Only TensorFormat_t::HW is allowed for multiply layer"); } if (weight_dims.size() != 2) { CK_THROW_(Error_t::WrongInput, "Only 2D weights is allowed for multiply layer"); } if (weight_dims[0] != in_dims[1]) { CK_THROW_(Error_t::WrongInput, "weight_dims[0] must be equal to in_dims[1]"); } batch_size_ = in_dims[0]; slot_num_ = weight_dims[0]; embedding_vec_size_ = weight_dims[1]; std::vector<size_t> out_dims{batch_size_, slot_num_ * embedding_vec_size_}; out_tensor.reset(new Tensor<float>(out_dims, blob_buff, in_tensor->get_format())); in_tensors_.emplace_back(in_tensor); out_tensors_.emplace_back(out_tensor); TensorFormat_t w_format = TensorFormat_t::HW; weights_.emplace_back(new Tensor<float>(weight_dims, weight_buff, w_format)); wgrad_.emplace_back(new Tensor<float>(weight_dims, wgrad_buff, w_format)); internal_buff_.reset(new GeneralBuffer<float>()); wgrad_tmp_trans_.reset(new Tensor<float>(out_dims, internal_buff_, TensorFormat_t::HW)); internal_buff_->init(get_device_id()); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } std::unique_ptr<DataSimulator<float>> MultiplyLayer::get_uniform_initializer(const int index) { float bottom_dim = slot_num_; float top_dim = slot_num_ * embedding_vec_size_; float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim); return std::unique_ptr<DataSimulator<float>>(new UnifiedDataSimulator<float>(-1 * limit, limit)); } std::unique_ptr<DataSimulator<float>> MultiplyLayer::get_xavier_uniform_initializer( const int index) { float bottom_dim = slot_num_; float top_dim = slot_num_ * embedding_vec_size_; return std::unique_ptr<DataSimulator<float>>(new VarianceScalingSimulator<float>( 1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim)); } std::unique_ptr<DataSimulator<float>> MultiplyLayer::get_xavier_norm_initializer(const int index) { float bottom_dim = slot_num_; float top_dim = slot_num_ * embedding_vec_size_; return std::unique_ptr<DataSimulator<float>>(new VarianceScalingSimulator<float>( 1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, 0 == index ? bottom_dim : 0, top_dim)); } std::unique_ptr<DataSimulator<float>> MultiplyLayer::get_default_initializer(const int index) { float bottom_dim = slot_num_; float top_dim = slot_num_ * embedding_vec_size_; return std::unique_ptr<DataSimulator<float>>(new VarianceScalingSimulator<float>( 1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim)); } void MultiplyLayer::fprop(hipStream_t stream) { CudaDeviceContext context(get_device_id()); float* input = in_tensors_[0]->get_ptr(); float* weight = weights_[0]->get_ptr(); float* output = out_tensors_[0]->get_ptr(); dim3 blockSize(embedding_vec_size_, 1, 1); dim3 gridSize(batch_size_, 1, 1); hipLaunchKernelGGL(( multiply_kernel), dim3(gridSize), dim3(blockSize), 0, stream, input, weight, output, batch_size_, slot_num_, embedding_vec_size_); } void MultiplyLayer::bprop(hipStream_t stream) { CudaDeviceContext context(get_device_id()); float* weight = weights_[0]->get_ptr(); float* wgrad = wgrad_[0]->get_ptr(); float* wgrad_tmp_trans = wgrad_tmp_trans_->get_ptr(); float* input = in_tensors_[0]->get_ptr(); float* output = out_tensors_[0]->get_ptr(); multiply_wgrad(output, input, wgrad, wgrad_tmp_trans, batch_size_, slot_num_, embedding_vec_size_, stream); // CAUSION: dgrad computation will modify the "input", so it must be put after wgrad computation multiply_dgrad(output, weight, input, batch_size_, slot_num_, embedding_vec_size_, stream); } } // namespace HugeCTR
d408dda8a283a670a81950870ad2ad3c141861ac.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/layers/element_wise_function.hpp" #include "HugeCTR/include/layers/multiply_layer.hpp" #include "HugeCTR/include/utils.cuh" #include "HugeCTR/include/utils.hpp" #include <algorithm> #include <functional> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { #define BLOCK_DIM_SIZE 32 template <typename T> __global__ void multiply_kernel(const T* input, const T* weight, T* output, int batch_size, int slot_num, int embedding_vec_size) { if ((blockIdx.x < batch_size) && (threadIdx.x < embedding_vec_size)) { for (int i = 0; i < slot_num; i++) { output[blockIdx.x * slot_num * embedding_vec_size + i * embedding_vec_size + threadIdx.x] = input[blockIdx.x * slot_num + i] * weight[i * embedding_vec_size + threadIdx.x]; } } } template <typename T> __global__ void multiply_transpose_fuse_kernel(int batch_size, int slot_num, int embedding_vec_size, const T* top_grad, const T* input, T* wgrad_tmp_trans) { int row = batch_size; int col = slot_num * embedding_vec_size; __shared__ T sh_data[BLOCK_DIM_SIZE + 1][BLOCK_DIM_SIZE]; int src_index_x = blockIdx.x * blockDim.x + threadIdx.x; int src_index_y = blockIdx.y * blockDim.y + threadIdx.y; if ((src_index_x < col) && (src_index_y < row)) { int index_in = src_index_y * col + src_index_x; sh_data[threadIdx.x][threadIdx.y] = top_grad[index_in] * input[index_in / embedding_vec_size]; } __syncthreads(); int dst_index_x = blockIdx.y * blockDim.y + threadIdx.x; int dst_index_y = blockIdx.x * blockDim.x + threadIdx.y; if ((dst_index_x < row) && (dst_index_y < col)) { int index_out = dst_index_y * row + dst_index_x; wgrad_tmp_trans[index_out] = sh_data[threadIdx.y][threadIdx.x]; } } // sum reduce computation in one block template <typename T> __global__ void sum_reduce_batch_kernel(int row, // row=gridDim.x int col, const T* input, T* output) { float local_sum = 0.0f; for (int tid = threadIdx.x; tid < col; tid += blockDim.x) { local_sum += input[blockIdx.x * col + tid]; } __syncthreads(); local_sum = blockReduceSum(local_sum); if (threadIdx.x == 0) { output[blockIdx.x] += local_sum; } } template <typename T> __global__ void multiply_dgrad_kernel(const T* top_grad, const T* weight, T* dgrad, int batch_size, int slot_num, int embedding_vec_size) { if ((blockIdx.x < batch_size) && (threadIdx.x < embedding_vec_size)) { for (int i = 0; i < slot_num; i++) { T local_sum = top_grad[blockIdx.x * slot_num * embedding_vec_size + i * embedding_vec_size + threadIdx.x] * weight[i * embedding_vec_size + threadIdx.x]; local_sum = blockReduceSum(local_sum); if (threadIdx.x == 0) { dgrad[blockIdx.x * slot_num + i] = local_sum; } } } } template <typename T> void multiply_wgrad(const T* top_grad, const T* input, T* wgrad, T* wgrad_tmp_trans, int batch_size, int slot_num, int embedding_vec_size, cudaStream_t stream) { dim3 blockSize1(BLOCK_DIM_SIZE, BLOCK_DIM_SIZE, 1); dim3 gridSize1((slot_num * embedding_vec_size + blockSize1.x - 1) / blockSize1.x, (batch_size + blockSize1.y - 1) / blockSize1.y, 1); multiply_transpose_fuse_kernel<<<gridSize1, blockSize1, 0, stream>>>( batch_size, slot_num, embedding_vec_size, top_grad, input, wgrad_tmp_trans); dim3 blockSize2(256, 1, 1); dim3 gridSize2(slot_num * embedding_vec_size, 1, 1); sum_reduce_batch_kernel<<<gridSize2, blockSize2, 0, stream>>>(slot_num * embedding_vec_size, batch_size, wgrad_tmp_trans, wgrad); } template <typename T> void multiply_dgrad(const T* top_grad, const T* weight, T* dgrad, int batch_size, int slot_num, int embedding_vec_size, cudaStream_t stream) { dim3 blockSize(embedding_vec_size, 1, 1); // note that embedding_vec_size should be < 1024 dim3 gridSize(batch_size, 1, 1); multiply_dgrad_kernel<<<gridSize, blockSize, 0, stream>>>(top_grad, weight, dgrad, batch_size, slot_num, embedding_vec_size); } } // end of namespace MultiplyLayer::MultiplyLayer(const std::shared_ptr<GeneralBuffer<float>>& weight_buff, const std::shared_ptr<GeneralBuffer<float>>& wgrad_buff, const std::shared_ptr<GeneralBuffer<float>>& blob_buff, const std::shared_ptr<Tensor<float>>& in_tensor, std::shared_ptr<Tensor<float>>& out_tensor, const std::vector<size_t>& weight_dims, int device_id, std::vector<Initializer_t> initializer_types) : Layer(device_id, initializer_types) { try { CudaDeviceContext context(get_device_id()); auto in_dims = in_tensor->get_dims(); if (in_dims.size() != 2) { CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be multiplied"); } if (in_tensor->get_format() != TensorFormat_t::HW) { CK_THROW_(Error_t::WrongInput, "Only TensorFormat_t::HW is allowed for multiply layer"); } if (weight_dims.size() != 2) { CK_THROW_(Error_t::WrongInput, "Only 2D weights is allowed for multiply layer"); } if (weight_dims[0] != in_dims[1]) { CK_THROW_(Error_t::WrongInput, "weight_dims[0] must be equal to in_dims[1]"); } batch_size_ = in_dims[0]; slot_num_ = weight_dims[0]; embedding_vec_size_ = weight_dims[1]; std::vector<size_t> out_dims{batch_size_, slot_num_ * embedding_vec_size_}; out_tensor.reset(new Tensor<float>(out_dims, blob_buff, in_tensor->get_format())); in_tensors_.emplace_back(in_tensor); out_tensors_.emplace_back(out_tensor); TensorFormat_t w_format = TensorFormat_t::HW; weights_.emplace_back(new Tensor<float>(weight_dims, weight_buff, w_format)); wgrad_.emplace_back(new Tensor<float>(weight_dims, wgrad_buff, w_format)); internal_buff_.reset(new GeneralBuffer<float>()); wgrad_tmp_trans_.reset(new Tensor<float>(out_dims, internal_buff_, TensorFormat_t::HW)); internal_buff_->init(get_device_id()); } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } std::unique_ptr<DataSimulator<float>> MultiplyLayer::get_uniform_initializer(const int index) { float bottom_dim = slot_num_; float top_dim = slot_num_ * embedding_vec_size_; float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim); return std::unique_ptr<DataSimulator<float>>(new UnifiedDataSimulator<float>(-1 * limit, limit)); } std::unique_ptr<DataSimulator<float>> MultiplyLayer::get_xavier_uniform_initializer( const int index) { float bottom_dim = slot_num_; float top_dim = slot_num_ * embedding_vec_size_; return std::unique_ptr<DataSimulator<float>>(new VarianceScalingSimulator<float>( 1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim)); } std::unique_ptr<DataSimulator<float>> MultiplyLayer::get_xavier_norm_initializer(const int index) { float bottom_dim = slot_num_; float top_dim = slot_num_ * embedding_vec_size_; return std::unique_ptr<DataSimulator<float>>(new VarianceScalingSimulator<float>( 1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, 0 == index ? bottom_dim : 0, top_dim)); } std::unique_ptr<DataSimulator<float>> MultiplyLayer::get_default_initializer(const int index) { float bottom_dim = slot_num_; float top_dim = slot_num_ * embedding_vec_size_; return std::unique_ptr<DataSimulator<float>>(new VarianceScalingSimulator<float>( 1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim)); } void MultiplyLayer::fprop(cudaStream_t stream) { CudaDeviceContext context(get_device_id()); float* input = in_tensors_[0]->get_ptr(); float* weight = weights_[0]->get_ptr(); float* output = out_tensors_[0]->get_ptr(); dim3 blockSize(embedding_vec_size_, 1, 1); dim3 gridSize(batch_size_, 1, 1); multiply_kernel<<<gridSize, blockSize, 0, stream>>>(input, weight, output, batch_size_, slot_num_, embedding_vec_size_); } void MultiplyLayer::bprop(cudaStream_t stream) { CudaDeviceContext context(get_device_id()); float* weight = weights_[0]->get_ptr(); float* wgrad = wgrad_[0]->get_ptr(); float* wgrad_tmp_trans = wgrad_tmp_trans_->get_ptr(); float* input = in_tensors_[0]->get_ptr(); float* output = out_tensors_[0]->get_ptr(); multiply_wgrad(output, input, wgrad, wgrad_tmp_trans, batch_size_, slot_num_, embedding_vec_size_, stream); // CAUSION: dgrad computation will modify the "input", so it must be put after wgrad computation multiply_dgrad(output, weight, input, batch_size_, slot_num_, embedding_vec_size_, stream); } } // namespace HugeCTR
b7fcf93134fa2504b051a8a938d4073bd7d13d64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "network.h" namespace apollo { namespace perception { __host__ __device__ float sigmoid_gpu(float x) { return 1.0 / (1.0 + exp(-x)); } __global__ void get_object_kernel(int n, const float *loc_data, const float *obj_data, const float *cls_data, const float *ori_data, const float *dim_data, const float *lof_data, const float *lor_data, const float *anchor_data, int width, int height, int num_anchors, int num_classes, float confidence_threshold, bool with_ori, bool with_dim, bool with_lof, bool with_lor, float *res_box_data, float *res_cls_data, int s_box_block_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) { int box_block = s_box_block_size; int idx = i; int c = idx % num_anchors; idx = idx / num_anchors; int w = idx % width; idx = idx / width; int h = idx; int offset_loc = ((h * width + w) * num_anchors + c) * 4; int offset_obj = (h * width + w) * num_anchors + c; int offset_cls = ((h * width + w) * num_anchors + c) * num_classes; float scale = obj_data[offset_obj]; // printf("%d %d %d %d %d (%d %d %d)| ",i,c,w,h,offset_loc,num_anchors,width,height); float cx = (w + sigmoid_gpu(loc_data[offset_loc + 0])) / width; float cy = (h + sigmoid_gpu(loc_data[offset_loc + 1])) / height; float hw = exp(loc_data[offset_loc + 2])* anchor_data[2 * c] / width * 0.5; float hh = exp(loc_data[offset_loc + 3]) * anchor_data[2 * c + 1] / height * 0.5; for (int k = 0; k < num_classes; ++k) { float prob = (cls_data[offset_cls + k] * scale > confidence_threshold ? cls_data[offset_cls + k] * scale : 0); // printf("%f %f | ",prob,cls_data[offset_cls + k] * scale); res_cls_data[k*width*height*num_anchors+i]=prob; } res_box_data[i * box_block + 0] = cx - hw; res_box_data[i * box_block + 1] = cy - hh; res_box_data[i * box_block + 2] = cx + hw; res_box_data[i * box_block + 3] = cy + hh; if (with_ori) { int offset_ori = ((h * width + w) * num_anchors + c) * 2; res_box_data[i*box_block+4]=atan2(ori_data[offset_ori+1],ori_data[offset_ori]); } if (with_dim) { int offset_dim = ((h * width + w) * num_anchors + c) * 3; res_box_data[i*box_block+5]=dim_data[offset_dim + 0]; res_box_data[i*box_block+6]=dim_data[offset_dim + 1]; res_box_data[i*box_block+7]=dim_data[offset_dim + 2]; } if (with_lof) { int offset_lof = ((h * width + w) * num_anchors + c) * 4; auto &&dst_ptr = res_box_data + i * box_block + 8; auto &&src_ptr = lof_data + offset_lof; auto sb_x = src_ptr[0] * hw * 2 + cx; auto sb_y = src_ptr[1] * hh * 2 + cy; auto sb_hw = exp(src_ptr[2]) * hw; auto sb_hh = exp(src_ptr[3]) * hh; dst_ptr[0] = sb_x - sb_hw; dst_ptr[1] = sb_y - sb_hh; dst_ptr[2] = sb_x + sb_hw; dst_ptr[3] = sb_y + sb_hh; } if (with_lor) { int offset_lor = ((h * width + w) * num_anchors + c) * 4; auto &&dst_ptr = res_box_data + i * box_block + 12; auto &&src_ptr = lor_data + offset_lor; auto sb_x = src_ptr[0] * hw * 2 + cx; auto sb_y = src_ptr[1] * hh * 2 + cy; auto sb_hw = exp(src_ptr[2]) * hw; auto sb_hh = exp(src_ptr[3]) * hh; dst_ptr[0] = sb_x - sb_hw; dst_ptr[1] = sb_y - sb_hh; dst_ptr[2] = sb_x + sb_hw; dst_ptr[3] = sb_y + sb_hh; } } } void GetObjectsGPU(int n, const float *loc_data, const float *obj_data, const float *cls_data, const float *ori_data, const float *dim_data, const float *lof_data, const float *lor_data, const float *anchor_data, int width, int height, int num_anchors, int num_classes, float confidence_threshold, bool with_ori, bool with_dim, bool with_lof, bool with_lor, float *res_box_data, float *res_cls_data, int s_box_block_size) { #ifdef __x86_64__ const int thread_size = 512; #else const int thread_size = 256; #endif int block_size = (n + thread_size -1) / thread_size; { get_object_kernel << < block_size, thread_size >> > (n, loc_data, obj_data, cls_data, ori_data, dim_data, lof_data, lor_data, anchor_data, width, height, num_anchors, num_classes, confidence_threshold, with_ori, with_dim, with_lof, with_lor, res_box_data, res_cls_data, s_box_block_size); } hipDeviceSynchronize(); } } // namespace apollo } // namespace perception
b7fcf93134fa2504b051a8a938d4073bd7d13d64.cu
/****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "network.h" namespace apollo { namespace perception { __host__ __device__ float sigmoid_gpu(float x) { return 1.0 / (1.0 + exp(-x)); } __global__ void get_object_kernel(int n, const float *loc_data, const float *obj_data, const float *cls_data, const float *ori_data, const float *dim_data, const float *lof_data, const float *lor_data, const float *anchor_data, int width, int height, int num_anchors, int num_classes, float confidence_threshold, bool with_ori, bool with_dim, bool with_lof, bool with_lor, float *res_box_data, float *res_cls_data, int s_box_block_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) { int box_block = s_box_block_size; int idx = i; int c = idx % num_anchors; idx = idx / num_anchors; int w = idx % width; idx = idx / width; int h = idx; int offset_loc = ((h * width + w) * num_anchors + c) * 4; int offset_obj = (h * width + w) * num_anchors + c; int offset_cls = ((h * width + w) * num_anchors + c) * num_classes; float scale = obj_data[offset_obj]; // printf("%d %d %d %d %d (%d %d %d)| ",i,c,w,h,offset_loc,num_anchors,width,height); float cx = (w + sigmoid_gpu(loc_data[offset_loc + 0])) / width; float cy = (h + sigmoid_gpu(loc_data[offset_loc + 1])) / height; float hw = exp(loc_data[offset_loc + 2])* anchor_data[2 * c] / width * 0.5; float hh = exp(loc_data[offset_loc + 3]) * anchor_data[2 * c + 1] / height * 0.5; for (int k = 0; k < num_classes; ++k) { float prob = (cls_data[offset_cls + k] * scale > confidence_threshold ? cls_data[offset_cls + k] * scale : 0); // printf("%f %f | ",prob,cls_data[offset_cls + k] * scale); res_cls_data[k*width*height*num_anchors+i]=prob; } res_box_data[i * box_block + 0] = cx - hw; res_box_data[i * box_block + 1] = cy - hh; res_box_data[i * box_block + 2] = cx + hw; res_box_data[i * box_block + 3] = cy + hh; if (with_ori) { int offset_ori = ((h * width + w) * num_anchors + c) * 2; res_box_data[i*box_block+4]=atan2(ori_data[offset_ori+1],ori_data[offset_ori]); } if (with_dim) { int offset_dim = ((h * width + w) * num_anchors + c) * 3; res_box_data[i*box_block+5]=dim_data[offset_dim + 0]; res_box_data[i*box_block+6]=dim_data[offset_dim + 1]; res_box_data[i*box_block+7]=dim_data[offset_dim + 2]; } if (with_lof) { int offset_lof = ((h * width + w) * num_anchors + c) * 4; auto &&dst_ptr = res_box_data + i * box_block + 8; auto &&src_ptr = lof_data + offset_lof; auto sb_x = src_ptr[0] * hw * 2 + cx; auto sb_y = src_ptr[1] * hh * 2 + cy; auto sb_hw = exp(src_ptr[2]) * hw; auto sb_hh = exp(src_ptr[3]) * hh; dst_ptr[0] = sb_x - sb_hw; dst_ptr[1] = sb_y - sb_hh; dst_ptr[2] = sb_x + sb_hw; dst_ptr[3] = sb_y + sb_hh; } if (with_lor) { int offset_lor = ((h * width + w) * num_anchors + c) * 4; auto &&dst_ptr = res_box_data + i * box_block + 12; auto &&src_ptr = lor_data + offset_lor; auto sb_x = src_ptr[0] * hw * 2 + cx; auto sb_y = src_ptr[1] * hh * 2 + cy; auto sb_hw = exp(src_ptr[2]) * hw; auto sb_hh = exp(src_ptr[3]) * hh; dst_ptr[0] = sb_x - sb_hw; dst_ptr[1] = sb_y - sb_hh; dst_ptr[2] = sb_x + sb_hw; dst_ptr[3] = sb_y + sb_hh; } } } void GetObjectsGPU(int n, const float *loc_data, const float *obj_data, const float *cls_data, const float *ori_data, const float *dim_data, const float *lof_data, const float *lor_data, const float *anchor_data, int width, int height, int num_anchors, int num_classes, float confidence_threshold, bool with_ori, bool with_dim, bool with_lof, bool with_lor, float *res_box_data, float *res_cls_data, int s_box_block_size) { #ifdef __x86_64__ const int thread_size = 512; #else const int thread_size = 256; #endif int block_size = (n + thread_size -1) / thread_size; { get_object_kernel << < block_size, thread_size >> > (n, loc_data, obj_data, cls_data, ori_data, dim_data, lof_data, lor_data, anchor_data, width, height, num_anchors, num_classes, confidence_threshold, with_ori, with_dim, with_lof, with_lor, res_box_data, res_cls_data, s_box_block_size); } cudaDeviceSynchronize(); } } // namespace apollo } // namespace perception
3780e8adb27d40fd64368b810d65bde2f0ed2db0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <vector> #include<fstream> #include<iomanip> #include <windows.h> using namespace std; //location_x, location_y, location_z, width, height, depth, fixed temperature typedef struct FixT { dim3 loc; dim3 size; float t; }; //config file typedef struct Conf { float k; int times; dim3 size; float defaultT; vector<FixT*> fixts; }; struct Conf conf; int conIndex = 0; /* split string */ vector<string> split(string str, string pattern) { vector<string> result; int pos; str += pattern; for (int i = 0; i < str.size(); i++) { pos = str.find(pattern, i); if (pos<str.size()) { result.push_back(str.substr(i, pos - i)); i = pos + pattern.size() - 1; } } return result; } //get fixed temperature struct data FixT* getFixT(vector<string> &vec) { FixT *fixt = (FixT*)malloc(sizeof(FixT)); int index = 0; fixt->loc.x = atoi(vec[index++].c_str()); fixt->loc.y = atoi(vec[index++].c_str()); fixt->loc.z = 0; if (vec.size()>5) { fixt->loc.z = atoi(vec[index++].c_str()); } fixt->size.x = atoi(vec[index++].c_str()); fixt->size.y = atoi(vec[index++].c_str()); fixt->size.z = 1; if (vec.size()>5) { fixt->size.z = atoi(vec[index++].c_str()); } fixt->t = atof(vec[index++].c_str()); return fixt; } //read data from file void readData(char *file) { char line[1024]; ifstream readFile(file); int rown = 0; while (!readFile.eof()) { readFile.getline(line, 1024); string str(line); if (line[0] != '#') { string str(line); if (str=="2D"||str=="3D") { continue; } vector<string> vec = split(str, ","); if (vec.size()==1) { if (rown>3) { rown = 0; conIndex++; } } switch (rown) { case 0: conf.k = atof(vec[0].c_str()); if (conf.k>1) { conf.k /= 100; } break; case 1: conf.times= atoi(vec[0].c_str()); break; case 2: conf.size.x = atoi(vec[0].c_str()); conf.size.y = atoi(vec[1].c_str()); conf.size.z = 1; if (vec.size()==3) { conf.size.z = atoi(vec[2].c_str()); } break; case 3: conf.defaultT = atof(vec[0].c_str()); break; default: FixT*fix = getFixT(vec); conf.fixts.push_back(fix); break; } rown++; } } readFile.close(); } //out data to file void outData(float **data,Conf*conf,string file) { FILE* fp = fopen(file.c_str(), "w+"); for (int z = 0; z < conf->size.z; z++) { for (int y = 0; y < conf->size.y; y++) { for (int x = 0; x < conf->size.x; x++) { float d = data[z][x + y*conf->size.x]; fprintf(fp, "%.4f", d); if (x<conf->size.x-1) { fprintf(fp,","); } } fprintf(fp, "\n"); } fprintf(fp, "\n"); } fclose(fp); } //gpu compute __global__ void simulat(float**devr,int x,int y,int z,float k) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int idz = threadIdx.z+ blockDim.z*blockIdx.z; if (idx>=x||idy>=y||idz>=z) { return; } int dirs[][3] = { {1,0,0}, { -1,0,0 }, { 0,1,0 }, { 0,-1,0 }, { 0,0,1 }, { 0,0,-1 }, }; float old=devr[idz][idx + idy*x]; if (old<0) { return; } float sum = 0; for (int i = 0; i < 6; i++) { int dx = dirs[i][0] + idx; int dy = dirs[i][1] + idy; int dz = dirs[i][2] + idz; if (dx>=0&&dx<x&&dy>=0&&dy<y&&dz>=0&&dz<z) { sum += abs(devr[dz][dx + dy*x]) - old; } } if (old>0) { devr[idz][idx + idy*x] = old + k*sum; } } //simulate starting void heat(Conf*conf,string file) { float **room; float **devr; int x = conf->size.x; int y = conf->size.y; int z = conf->size.z; room = (float**)malloc(sizeof(float*)*z);//cpu mem for (int i = 0; i < z; i++) { do { room[i] = (float*)malloc(sizeof(float)*x*y); } while (room[i]==0); } for (int i = 0; i < z; i++) { for (int j = 0; j < x*y; j++) { room[i][j] = conf->defaultT; } } //init data for (int i = 0; i < conf->fixts.size(); i++) { FixT*fix = conf->fixts[i]; for (int z1 = fix->loc.z; z1 < fix->size.z + fix->loc.z; z1++) { for (int y1 = fix->loc.y; y1 < fix->size.y + fix->loc.y; y1++) { for (int x1 = fix->loc.x; x1 < fix->size.x + fix->loc.x; x1++) { int index = x1 + y1*x; room[z1][index] = -fix->t; } } } } //gpu array float **tem = (float**)malloc(sizeof(float*)*z); for (int i = 0; i < z; i++) { hipMalloc((void**)&tem[i], sizeof(float)*x*y); hipMemcpy(tem[i], room[i], sizeof(float) * x*y, hipMemcpyHostToDevice); } hipMalloc((void**)&devr, sizeof(float*)*z); hipMemcpy(devr, tem, sizeof(float*) * z, hipMemcpyHostToDevice); //gpu thread init int sx = 5; int sy = 5; int sz = 5; dim3 blocks(x/sx+(x%sx>0),y/sy+(y%sy>0) , z / sz + (z%sz>0)); dim3 threads(sx,sy,sz); for (int i = 0; i < conf->times; i++) { simulat << <blocks, threads >> >(devr, x, y, z, conf->k); int cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { cout << "error" << endl; } } for (int i = 0; i < z; i++) { hipMemcpy( room[i], tem[i], sizeof(float) * x*y, hipMemcpyDeviceToHost); for (int j = 0; j < x*y; j++) { if (room[i][j]<0) { room[i][j] = -room[i][j]; } } hipFree(tem[i]); } hipFree(devr); outData(room,conf,file); for (int i = 0; i < z; i++) { free(room[i]); } free(room); } int main(int argc,char **argv) { readData(argv[1]); heat(&conf, "heatOutput.csv"); cout << "The End!" << endl; getchar(); return 0; }
3780e8adb27d40fd64368b810d65bde2f0ed2db0.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <vector> #include<fstream> #include<iomanip> #include <windows.h> using namespace std; //location_x, location_y, location_z, width, height, depth, fixed temperature typedef struct FixT { dim3 loc; dim3 size; float t; }; //config file typedef struct Conf { float k; int times; dim3 size; float defaultT; vector<FixT*> fixts; }; struct Conf conf; int conIndex = 0; /* split string */ vector<string> split(string str, string pattern) { vector<string> result; int pos; str += pattern; for (int i = 0; i < str.size(); i++) { pos = str.find(pattern, i); if (pos<str.size()) { result.push_back(str.substr(i, pos - i)); i = pos + pattern.size() - 1; } } return result; } //get fixed temperature struct data FixT* getFixT(vector<string> &vec) { FixT *fixt = (FixT*)malloc(sizeof(FixT)); int index = 0; fixt->loc.x = atoi(vec[index++].c_str()); fixt->loc.y = atoi(vec[index++].c_str()); fixt->loc.z = 0; if (vec.size()>5) { fixt->loc.z = atoi(vec[index++].c_str()); } fixt->size.x = atoi(vec[index++].c_str()); fixt->size.y = atoi(vec[index++].c_str()); fixt->size.z = 1; if (vec.size()>5) { fixt->size.z = atoi(vec[index++].c_str()); } fixt->t = atof(vec[index++].c_str()); return fixt; } //read data from file void readData(char *file) { char line[1024]; ifstream readFile(file); int rown = 0; while (!readFile.eof()) { readFile.getline(line, 1024); string str(line); if (line[0] != '#') { string str(line); if (str=="2D"||str=="3D") { continue; } vector<string> vec = split(str, ","); if (vec.size()==1) { if (rown>3) { rown = 0; conIndex++; } } switch (rown) { case 0: conf.k = atof(vec[0].c_str()); if (conf.k>1) { conf.k /= 100; } break; case 1: conf.times= atoi(vec[0].c_str()); break; case 2: conf.size.x = atoi(vec[0].c_str()); conf.size.y = atoi(vec[1].c_str()); conf.size.z = 1; if (vec.size()==3) { conf.size.z = atoi(vec[2].c_str()); } break; case 3: conf.defaultT = atof(vec[0].c_str()); break; default: FixT*fix = getFixT(vec); conf.fixts.push_back(fix); break; } rown++; } } readFile.close(); } //out data to file void outData(float **data,Conf*conf,string file) { FILE* fp = fopen(file.c_str(), "w+"); for (int z = 0; z < conf->size.z; z++) { for (int y = 0; y < conf->size.y; y++) { for (int x = 0; x < conf->size.x; x++) { float d = data[z][x + y*conf->size.x]; fprintf(fp, "%.4f", d); if (x<conf->size.x-1) { fprintf(fp,","); } } fprintf(fp, "\n"); } fprintf(fp, "\n"); } fclose(fp); } //gpu compute __global__ void simulat(float**devr,int x,int y,int z,float k) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int idy = threadIdx.y + blockDim.y * blockIdx.y; int idz = threadIdx.z+ blockDim.z*blockIdx.z; if (idx>=x||idy>=y||idz>=z) { return; } int dirs[][3] = { {1,0,0}, { -1,0,0 }, { 0,1,0 }, { 0,-1,0 }, { 0,0,1 }, { 0,0,-1 }, }; float old=devr[idz][idx + idy*x]; if (old<0) { return; } float sum = 0; for (int i = 0; i < 6; i++) { int dx = dirs[i][0] + idx; int dy = dirs[i][1] + idy; int dz = dirs[i][2] + idz; if (dx>=0&&dx<x&&dy>=0&&dy<y&&dz>=0&&dz<z) { sum += abs(devr[dz][dx + dy*x]) - old; } } if (old>0) { devr[idz][idx + idy*x] = old + k*sum; } } //simulate starting void heat(Conf*conf,string file) { float **room; float **devr; int x = conf->size.x; int y = conf->size.y; int z = conf->size.z; room = (float**)malloc(sizeof(float*)*z);//cpu mem for (int i = 0; i < z; i++) { do { room[i] = (float*)malloc(sizeof(float)*x*y); } while (room[i]==0); } for (int i = 0; i < z; i++) { for (int j = 0; j < x*y; j++) { room[i][j] = conf->defaultT; } } //init data for (int i = 0; i < conf->fixts.size(); i++) { FixT*fix = conf->fixts[i]; for (int z1 = fix->loc.z; z1 < fix->size.z + fix->loc.z; z1++) { for (int y1 = fix->loc.y; y1 < fix->size.y + fix->loc.y; y1++) { for (int x1 = fix->loc.x; x1 < fix->size.x + fix->loc.x; x1++) { int index = x1 + y1*x; room[z1][index] = -fix->t; } } } } //gpu array float **tem = (float**)malloc(sizeof(float*)*z); for (int i = 0; i < z; i++) { cudaMalloc((void**)&tem[i], sizeof(float)*x*y); cudaMemcpy(tem[i], room[i], sizeof(float) * x*y, cudaMemcpyHostToDevice); } cudaMalloc((void**)&devr, sizeof(float*)*z); cudaMemcpy(devr, tem, sizeof(float*) * z, cudaMemcpyHostToDevice); //gpu thread init int sx = 5; int sy = 5; int sz = 5; dim3 blocks(x/sx+(x%sx>0),y/sy+(y%sy>0) , z / sz + (z%sz>0)); dim3 threads(sx,sy,sz); for (int i = 0; i < conf->times; i++) { simulat << <blocks, threads >> >(devr, x, y, z, conf->k); int cudaStatus = cudaThreadSynchronize(); if (cudaStatus != cudaSuccess) { cout << "error" << endl; } } for (int i = 0; i < z; i++) { cudaMemcpy( room[i], tem[i], sizeof(float) * x*y, cudaMemcpyDeviceToHost); for (int j = 0; j < x*y; j++) { if (room[i][j]<0) { room[i][j] = -room[i][j]; } } cudaFree(tem[i]); } cudaFree(devr); outData(room,conf,file); for (int i = 0; i < z; i++) { free(room[i]); } free(room); } int main(int argc,char **argv) { readData(argv[1]); heat(&conf, "heatOutput.csv"); cout << "The End!" << endl; getchar(); return 0; }
fada2c6df19fcc814954d1bbc2f519a61ae9b2f4.hip
// !!! This is a file automatically generated by hipify!!! /* Before you use this version, double check the GPU memory capacity, * Typically, we need GPU to able to take the size of proj_data_size*2 + volume_size*8. * Otherwise you have to choose old versions, or try to modify this version to use CPU computing/storage as much as possible **/ #include <cmath> #include <fstream> #include <time.h> #include <iostream> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <assert.h> #define Default_GPU 0 #define Number_of_Devices 1 #define FBCT 0 // 0: CBCT; 1: multiple layer FBCT #define DEBUG 1 using namespace std; // Mode selection const int FISTA = 1; // -2: execution configuration test // -1: adjoint operator check // 0: SART + TV // 1: SART + TV + FISTA const float lambda_TV = 0.00f; //regularization parameter for the tv norm const float lambda_L1 = 0.00f; //regularization parameter for the l1 norm const int Iter_num = 20; const float Lip_con = 32.0f; // Distances const float DSO = 1.0f; const float DOD = -1.0f; // Reconstructed volume properties const int M = 512; // reconstruction volume x range const int N = 512; // reconstruction volume y range const int ZETA = 512; // reconstruction volume z range const int NO_X = M; const int NO_Y = N; const int NO_Z = ZETA; const int NO_VOXEL = M*N*ZETA; const float volumn_x = 1e-4 ; // (in meter) const float inv_volumn_x = 1.0/volumn_x; const int M_Offset = 0; // for half detector use const float boundary_voxel_x = -volumn_x*(float(M)/2.0f+M_Offset); const float volumn_y = volumn_x ; const float inv_volumn_y = 1.0/volumn_y; const float boundary_voxel_y = -volumn_y*(float(N)/2.0f); const float volumn_z = volumn_x ; const float inv_volumn_z = 1.0/volumn_z; const float boundary_voxel_z = -volumn_z*(float(ZETA)/2.0f); // parameters for half detector offset const float Offset = 0; // Source properties const float Source_x = DSO; const float Source_y = Offset; const float Source_z = 0; // Projection properties const int R = 1024; // detector width const int Z_prj = 1024; // detector height // Note: for FBCT, Z_prj = ZETA const float Detector_pixel_x = 1.2e-4; const float Detector_Ymin = -Detector_pixel_x*(float(R)/2.0f - 0.5f) + Offset; const float Detector_Zmin = -Detector_pixel_x*(float(Z_prj)/2.0f - 0.5f); const float PI = 3.141592653589793f; // acquisition parameters const int Nviews = 220; const float us_rate = 1.00f; const float initialAngle= 0.00f ; const float shiftAngle= 0.0f; const float MAX_infi = 1e16; const int DenoiseOption = 4; #include "InitGPU.h" #include "kernel_tool_functions.cu" #include "host_tool_functions.cu" /* If you want to use the code in which the backprojection is implemented in pixel-driven, * please uncomment the follwing two files and comment out the counterparts */ // #include "pixel_driven_backprj/kernel_IterativeRecon_CBCT.cu" // #include "pixel_driven_backprj/host_IterativeRecon_CBCT.c" // #if FBCT==1 // #include "kernel_IterativeRecon_FBCT.cu" // #else // #include "kernel_IterativeRecon_CBCT.cu" // #endif #include "kernel_IterativeRecon_universal.cu" //This version intergrate both CBCT and FBCT; #include "kernel_IterativeRecon_universal_multiGPU.cu" // Always be inlcuded // #include "host_IterativeRecon_CBCT.c" #include "host_IterativeRecon_CBCT_multiGPU.c" #include "host_FGP_Denoise_CPU.h" #include "kernel_FGP_Denoise_GPUx4.cu" #include "host_FPG_Denoise_GPUx4.c" #include "kernel_FGP_Denoise_GPUx7.cu" #include "host_FGP_Denoise_GPUx7.cu" main(int argc, char ** argv) { // print CUDA information if (!InitCUDA()) { return 0; } /* ************* User defined parameters ************/ char directory[]="/home/huifeng/CUDA_multiGPU/CBCT/"; char objectName[]="SLPhantom2"; char outputFolder[]="/Recon_Phantom_512/"; int Niter_denoise = 20; //iter number for denoising problem /*********** other declared variables ************/ float step_size = 2.0f/Lip_con; float lambda_denoise_TV = 2.0f*lambda_TV/Lip_con; double data_fidelity = 0.0f; double tv_value = 0.0f; double object_function_value_xk; double *object_function_array = new double [Iter_num*3]; bzero(object_function_array, sizeof(double)*Iter_num*3); float t_k; float t_k_1=1.0f; FILE *fp; char filename[200]; char fn[200]; float endAngle = initialAngle + (Nviews - 1)*us_rate; /**************** CPU memory allocation *****************/ // for 3D reconstructed volume float *F_Y_k = new float [M*N*ZETA]; // Y(k) bzero(F_Y_k, sizeof(float)*M*N*ZETA); float *F_X_k_1 = new float [M*N*ZETA]; // X(k-1) bzero(F_X_k_1, sizeof(float)*M*N*ZETA); float *F_recon; std::cout << "Testing ... " << std::endl; checkCuda(hipSetDevice(Default_GPU)); checkCuda( hipHostMalloc((void**)&F_recon, sizeof(float)*M*N*ZETA) ); // host pinned memory // for 2D projection dataset float *h_proj_forward = new float [R*Z_prj*Nviews]; bzero(h_proj_forward, sizeof(float)*R*Z_prj*Nviews); float *h_proj_measured = new float [R*Z_prj*Nviews]; bzero(h_proj_measured, sizeof(float)*R*Z_prj*Nviews); /**************** GPU memory allocation *****************/ size_t d_proj_data_size = sizeof(float)*R*Z_prj*Nviews; size_t d_volume_size = sizeof(float)*M*N*ZETA; // allocate GPU memory for the whole measurement data float *d_proj_data = NULL; hipMalloc((void**)&d_proj_data, d_proj_data_size); hipMemcpy(d_proj_data, h_proj_measured, d_proj_data_size, hipMemcpyHostToDevice); // allocate GPU memory for the recon volume float *d_recon = NULL; hipMalloc((void**)&d_recon, d_volume_size); hipMemset(d_recon, 0, d_volume_size); /********** Read Projections **************/ // printf("Read projection files ...\n"); // // for (int j=0;j<Nviews;j++) // { // fileAngle = float(j*us_rate + initialAngle); // if ((CT_TOMO == 1) && (j>=(Nviews/2))) // { // fileAngle = 180+ (j-Nviews/2)*us_rate + initialAngle; // } // if (fileAngle < 0) // fileAngle = fileAngle + 360; // // strcpy(filename,directory); // sprintf(fn,"/AnalyticalForwardProjection/CBCT_spheres_Projections/phi_%.02f.proj", fileAngle); // strcat(filename,fn); // cout<<fn<<endl; // if ( (fp = fopen(filename,"rb")) == NULL ) // { // printf("can not open projection files for main function \n"); // printf("%s\n",filename); // exit(0); // } // // fseek(fp,sizeof(float)*R*(int(2048/2-Z_prj/2)),0); // If you want to read part of the projections // fread(h_proj_measured + j*Z_prj*R, sizeof(float)*Z_prj*R,1,fp); // fclose(fp); // } /********** Inverse Crime study **************/ // load volumetric image strcpy(filename,directory); sprintf(fn,"SLphantom3d_512.dat"); strcat(filename,fn); cout<<"Loading "<<fn<<endl; if ( (fp = fopen(filename,"rb")) == NULL ) { printf("Can not load volumetric image \n"); printf("%s\n",filename); goto endProgram; } fread(F_recon, sizeof(float)*M*N*ZETA,1,fp); fclose(fp); cout<<"Load Phantom Sucessfully!"<<endl; hipMemcpy(d_recon, F_recon, d_volume_size, hipMemcpyHostToDevice); Forward_3D_ray_driven_siddon(d_recon, d_proj_data); // SaveDeviceDataToFile(d_proj_data,R*Z_prj*Nviews,"../GeneratedProjection.dat"); hipMemset(d_recon, 0, d_volume_size); /********** Load initial guess **************/ // strcpy(filename,directory); // sprintf(fn,"ReconTemp.recon"); // strcat(filename,fn); // cout<<"Loading "<<fn<<endl; // if ( (fp = fopen(filename,"rb")) == NULL ) // { // printf("Can not load volumetric image \n"); // printf("%s\n",filename); // exit(0); // } // fread(F_recon, sizeof(float)*M*N*ZETA,1,fp); // fclose(fp); // cout<<"Load Initial Guess Sucessfully!"<<endl; /****************Iteration Reconstruction**************************/ //Set Timer 1 struct timeval t1,t2; gettimeofday(&t1,NULL); for (int k=1;k<=Iter_num;k++) { // if (FISTA==-2) // "occupancy calculator", check the best execution configuration. Refer to the program guide // { // int numBlocks; // Occupancy in terms of active blocks // int blockSize = 128; // int activeWarps; // int maxWarps; // // hipDeviceProp_t prop; // hipGetDeviceProperties(&prop, Default_GPU); // // hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks,backprj_ray_driven_3d_kernel,blockSize,0); // activeWarps = numBlocks * blockSize / prop.warpSize; // maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; // std::cout << "Occupancy: " << (double)activeWarps / maxWarps * 100 << "%" << std::endl; // std::cout << "MaxActiveBlocksPerMultiprocessor: " << numBlocks << std::endl; // goto endProgram; // } if (FISTA==-1) { /*Note: You need to first uncomment the phantom loading code to initialize a valid F_recon*/ CheckMatchedJointOperator(F_recon); goto endProgram; } if (FISTA==0) { printf("Undergoing SART updating... relaxation = %f\n", step_size); Reconstruction_3D_ray_driven_CBCT(d_recon, d_proj_data, step_size); // SaveDeviceDataToFile(d_recon,M*N*ZETA,"../Recon.dat"); if (lambda_TV>0.0f) { printf("Undergoing TV regularization ...\n"); switch(DenoiseOption) // Denoise options { case 1 : FGP_denoise_GPUx7_exact(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x7 volume size, fast case 2 : FGP_denoise_GPUx4_exact(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, slowest case 3 : FGP_denoise_GPUx4_apprx(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, slower case 4 : GP_denoise_GPUx4_fast(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, fast, slow in convergence } std::cout<<"TV regularization finished.\n"; } hipMemcpy(F_recon, d_recon, d_volume_size, hipMemcpyDeviceToHost); } if (FISTA==1) { printf("Undergoing SART updating... relaxation = %f\n", step_size); hipMemcpy(d_recon, F_Y_k, d_volume_size, hipMemcpyHostToDevice); Reconstruction_3D_ray_driven_CBCT(d_recon, d_proj_data, step_size); if (lambda_TV>0.0f) { printf("Undergoing TV regularization ...\n"); switch(DenoiseOption) // Denoise options { case 1 : FGP_denoise_GPUx7_exact(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x7 volume size, fast case 2 : FGP_denoise_GPUx4_exact(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, slowest case 3 : FGP_denoise_GPUx4_apprx(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, slower case 4 : GP_denoise_GPUx4_fast(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, fast, slow in convergence } std::cout<<"TV regularization finished.\n"; } hipMemcpy(F_recon, d_recon, d_volume_size, hipMemcpyDeviceToHost); t_k = (1.0f + sqrt(1.0f + 4.0f*t_k_1*t_k_1) )/2.0f; // Note: t(k) = [1+sqrt(1+4*t(k-1)^2)]/2 for (int i=0;i<NO_VOXEL;i++) F_Y_k[i] = F_recon[i] + (t_k_1 -1.0f)/t_k * (F_recon[i] - F_X_k_1[i]); // Note: y(k) = x(k) + [t(k-1) -1]/t(k) * [x(k)-x(k-1)] t_k_1 = t_k; // Note: Update t(k-1): t(k-1) <- t(k) memcpy(F_X_k_1,F_recon,sizeof(float)*M*N*ZETA); // Note: Update x(k-1): x(k-1) <- x(k) } /*****************Calculating Obj Func Value ********************/ std::cout<<"Calculating Object Func Value ...\n"; //Note: object function value || Ax - b ||_2 + 2*lambda_TV*||f||_tvnorm + lambda_L1*||\phi f ||_L1 ; /*** data fidelity ****/ std::cout<<" - calculating data fidelity ... \n"; float *d_proj_forward = NULL; hipMalloc((void**)&d_proj_forward, d_proj_data_size); hipMemset(d_proj_forward, 0, d_proj_data_size); Forward_3D_ray_driven_siddon(d_recon, d_proj_forward); data_fidelity = L2_norm_gpu(d_proj_forward, d_proj_data); std::cout<<" * L2 Norm="<<data_fidelity<<endl; hipFree(d_proj_forward); /*** TV norm ****/ std::cout<<" - calculating TV norm ... \n"; tv_value = TV_norm_gpu(d_recon); std::cout<<" * TV value="<<tv_value<<endl; /***** obj function ******/ object_function_value_xk = data_fidelity + 2.0f*lambda_TV*tv_value; //Note: object_function_value_xk = data_fidelity + 2.0f*lambda_TV*tv_value + 1.0f*lambda_L1*l1_value; object_function_array[k*3-3] = tv_value; object_function_array[k*3-2] = data_fidelity; object_function_array[k*3-1] = object_function_value_xk; std::cout<<"Object function value for x(k) = "<< tv_value <<" + "<< data_fidelity <<" = "<<object_function_value_xk <<std::endl; /***************** Saving ********************/ strcpy(filename,directory); sprintf(fn,"%s/%s_%d_%d_%d_%.0fum_new_view_%d_(%.0f,%.0f)_TV_%.2f_L1_%.2f_Lip_%.2f.recon",outputFolder, objectName, M,N,ZETA, volumn_x*1000000, Nviews, initialAngle, endAngle, lambda_TV, lambda_L1, Lip_con); strcat(filename,fn); if ( (fp = fopen(filename,"wb")) == NULL ) { printf("can not open file to write the intermediate reconstructed image \n"); printf("%s\n",filename); exit(0); } fwrite(F_recon,sizeof(float)*M*N*ZETA,1,fp); fclose(fp); strcpy(filename,directory); sprintf(fn,"%s/object_func_%s_view_%d_(%.0f,%.0f)_TV_%.2f_Lip_%.2f.bin",outputFolder, objectName, Nviews, initialAngle, endAngle, lambda_TV, Lip_con); strcat(filename,fn); if ( (fp = fopen(filename,"wb")) == NULL ) { printf("can not open file to write the tv_value_file \n"); printf("%s\n",filename); exit(0); } fwrite(object_function_array,sizeof(double)*k*3,1,fp); fclose(fp); if (k%5==1) { strcpy(filename,directory); sprintf(fn,"%s/%s_%d_%d_%d_%.0fum_iterative_%d_view_%d_(%.0f,%.0f)_TV_%.2f_L1_%.2f_Lip_%.2f.recon",outputFolder, objectName, M,N,ZETA, volumn_x*1000000, k, Nviews, initialAngle, endAngle, lambda_TV, lambda_L1, Lip_con); strcat(filename,fn); if ( (fp = fopen(filename,"wb")) == NULL ) { printf("can not open file to write the reconstructed image \n"); printf("%s\n",filename); exit(0); } fwrite(F_recon,sizeof(float)*M*N*ZETA,1,fp); fclose(fp); } // Note: F[i,j,k] = F [k*M*N+j*M+i]; i:row index; j:column index; k:layer index std::cout<<"Have done "<< k <<" iteration(s)"<<std::endl<<endl; } // End timer gettimeofday(&t2,NULL); printf("Whole computing (gettimeofday): %f (s)\n\n\n", (t2.tv_sec-t1.tv_sec + (t2.tv_usec-t1.tv_usec)/1000000.0)); endProgram: ; hipFree(d_proj_data); hipFree(d_recon); hipHostFree(F_recon); delete []F_Y_k; delete []F_X_k_1; delete []h_proj_forward; delete []h_proj_measured; delete []object_function_array; return 0; }
fada2c6df19fcc814954d1bbc2f519a61ae9b2f4.cu
/* Before you use this version, double check the GPU memory capacity, * Typically, we need GPU to able to take the size of proj_data_size*2 + volume_size*8. * Otherwise you have to choose old versions, or try to modify this version to use CPU computing/storage as much as possible **/ #include <cmath> #include <fstream> #include <time.h> #include <iostream> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <assert.h> #define Default_GPU 0 #define Number_of_Devices 1 #define FBCT 0 // 0: CBCT; 1: multiple layer FBCT #define DEBUG 1 using namespace std; // Mode selection const int FISTA = 1; // -2: execution configuration test // -1: adjoint operator check // 0: SART + TV // 1: SART + TV + FISTA const float lambda_TV = 0.00f; //regularization parameter for the tv norm const float lambda_L1 = 0.00f; //regularization parameter for the l1 norm const int Iter_num = 20; const float Lip_con = 32.0f; // Distances const float DSO = 1.0f; const float DOD = -1.0f; // Reconstructed volume properties const int M = 512; // reconstruction volume x range const int N = 512; // reconstruction volume y range const int ZETA = 512; // reconstruction volume z range const int NO_X = M; const int NO_Y = N; const int NO_Z = ZETA; const int NO_VOXEL = M*N*ZETA; const float volumn_x = 1e-4 ; // (in meter) const float inv_volumn_x = 1.0/volumn_x; const int M_Offset = 0; // for half detector use const float boundary_voxel_x = -volumn_x*(float(M)/2.0f+M_Offset); const float volumn_y = volumn_x ; const float inv_volumn_y = 1.0/volumn_y; const float boundary_voxel_y = -volumn_y*(float(N)/2.0f); const float volumn_z = volumn_x ; const float inv_volumn_z = 1.0/volumn_z; const float boundary_voxel_z = -volumn_z*(float(ZETA)/2.0f); // parameters for half detector offset const float Offset = 0; // Source properties const float Source_x = DSO; const float Source_y = Offset; const float Source_z = 0; // Projection properties const int R = 1024; // detector width const int Z_prj = 1024; // detector height // Note: for FBCT, Z_prj = ZETA const float Detector_pixel_x = 1.2e-4; const float Detector_Ymin = -Detector_pixel_x*(float(R)/2.0f - 0.5f) + Offset; const float Detector_Zmin = -Detector_pixel_x*(float(Z_prj)/2.0f - 0.5f); const float PI = 3.141592653589793f; // acquisition parameters const int Nviews = 220; const float us_rate = 1.00f; const float initialAngle= 0.00f ; const float shiftAngle= 0.0f; const float MAX_infi = 1e16; const int DenoiseOption = 4; #include "InitGPU.h" #include "kernel_tool_functions.cu" #include "host_tool_functions.cu" /* If you want to use the code in which the backprojection is implemented in pixel-driven, * please uncomment the follwing two files and comment out the counterparts */ // #include "pixel_driven_backprj/kernel_IterativeRecon_CBCT.cu" // #include "pixel_driven_backprj/host_IterativeRecon_CBCT.c" // #if FBCT==1 // #include "kernel_IterativeRecon_FBCT.cu" // #else // #include "kernel_IterativeRecon_CBCT.cu" // #endif #include "kernel_IterativeRecon_universal.cu" //This version intergrate both CBCT and FBCT; #include "kernel_IterativeRecon_universal_multiGPU.cu" // Always be inlcuded // #include "host_IterativeRecon_CBCT.c" #include "host_IterativeRecon_CBCT_multiGPU.c" #include "host_FGP_Denoise_CPU.h" #include "kernel_FGP_Denoise_GPUx4.cu" #include "host_FPG_Denoise_GPUx4.c" #include "kernel_FGP_Denoise_GPUx7.cu" #include "host_FGP_Denoise_GPUx7.cu" main(int argc, char ** argv) { // print CUDA information if (!InitCUDA()) { return 0; } /* ************* User defined parameters ************/ char directory[]="/home/huifeng/CUDA_multiGPU/CBCT/"; char objectName[]="SLPhantom2"; char outputFolder[]="/Recon_Phantom_512/"; int Niter_denoise = 20; //iter number for denoising problem /*********** other declared variables ************/ float step_size = 2.0f/Lip_con; float lambda_denoise_TV = 2.0f*lambda_TV/Lip_con; double data_fidelity = 0.0f; double tv_value = 0.0f; double object_function_value_xk; double *object_function_array = new double [Iter_num*3]; bzero(object_function_array, sizeof(double)*Iter_num*3); float t_k; float t_k_1=1.0f; FILE *fp; char filename[200]; char fn[200]; float endAngle = initialAngle + (Nviews - 1)*us_rate; /**************** CPU memory allocation *****************/ // for 3D reconstructed volume float *F_Y_k = new float [M*N*ZETA]; // Y(k) bzero(F_Y_k, sizeof(float)*M*N*ZETA); float *F_X_k_1 = new float [M*N*ZETA]; // X(k-1) bzero(F_X_k_1, sizeof(float)*M*N*ZETA); float *F_recon; std::cout << "Testing ... " << std::endl; checkCuda(cudaSetDevice(Default_GPU)); checkCuda( cudaMallocHost((void**)&F_recon, sizeof(float)*M*N*ZETA) ); // host pinned memory // for 2D projection dataset float *h_proj_forward = new float [R*Z_prj*Nviews]; bzero(h_proj_forward, sizeof(float)*R*Z_prj*Nviews); float *h_proj_measured = new float [R*Z_prj*Nviews]; bzero(h_proj_measured, sizeof(float)*R*Z_prj*Nviews); /**************** GPU memory allocation *****************/ size_t d_proj_data_size = sizeof(float)*R*Z_prj*Nviews; size_t d_volume_size = sizeof(float)*M*N*ZETA; // allocate GPU memory for the whole measurement data float *d_proj_data = NULL; cudaMalloc((void**)&d_proj_data, d_proj_data_size); cudaMemcpy(d_proj_data, h_proj_measured, d_proj_data_size, cudaMemcpyHostToDevice); // allocate GPU memory for the recon volume float *d_recon = NULL; cudaMalloc((void**)&d_recon, d_volume_size); cudaMemset(d_recon, 0, d_volume_size); /********** Read Projections **************/ // printf("Read projection files ...\n"); // // for (int j=0;j<Nviews;j++) // { // fileAngle = float(j*us_rate + initialAngle); // if ((CT_TOMO == 1) && (j>=(Nviews/2))) // { // fileAngle = 180+ (j-Nviews/2)*us_rate + initialAngle; // } // if (fileAngle < 0) // fileAngle = fileAngle + 360; // // strcpy(filename,directory); // sprintf(fn,"/AnalyticalForwardProjection/CBCT_spheres_Projections/phi_%.02f.proj", fileAngle); // strcat(filename,fn); // cout<<fn<<endl; // if ( (fp = fopen(filename,"rb")) == NULL ) // { // printf("can not open projection files for main function \n"); // printf("%s\n",filename); // exit(0); // } // // fseek(fp,sizeof(float)*R*(int(2048/2-Z_prj/2)),0); // If you want to read part of the projections // fread(h_proj_measured + j*Z_prj*R, sizeof(float)*Z_prj*R,1,fp); // fclose(fp); // } /********** Inverse Crime study **************/ // load volumetric image strcpy(filename,directory); sprintf(fn,"SLphantom3d_512.dat"); strcat(filename,fn); cout<<"Loading "<<fn<<endl; if ( (fp = fopen(filename,"rb")) == NULL ) { printf("Can not load volumetric image \n"); printf("%s\n",filename); goto endProgram; } fread(F_recon, sizeof(float)*M*N*ZETA,1,fp); fclose(fp); cout<<"Load Phantom Sucessfully!"<<endl; cudaMemcpy(d_recon, F_recon, d_volume_size, cudaMemcpyHostToDevice); Forward_3D_ray_driven_siddon(d_recon, d_proj_data); // SaveDeviceDataToFile(d_proj_data,R*Z_prj*Nviews,"../GeneratedProjection.dat"); cudaMemset(d_recon, 0, d_volume_size); /********** Load initial guess **************/ // strcpy(filename,directory); // sprintf(fn,"ReconTemp.recon"); // strcat(filename,fn); // cout<<"Loading "<<fn<<endl; // if ( (fp = fopen(filename,"rb")) == NULL ) // { // printf("Can not load volumetric image \n"); // printf("%s\n",filename); // exit(0); // } // fread(F_recon, sizeof(float)*M*N*ZETA,1,fp); // fclose(fp); // cout<<"Load Initial Guess Sucessfully!"<<endl; /****************Iteration Reconstruction**************************/ //Set Timer 1 struct timeval t1,t2; gettimeofday(&t1,NULL); for (int k=1;k<=Iter_num;k++) { // if (FISTA==-2) // "occupancy calculator", check the best execution configuration. Refer to the program guide // { // int numBlocks; // Occupancy in terms of active blocks // int blockSize = 128; // int activeWarps; // int maxWarps; // // cudaDeviceProp prop; // cudaGetDeviceProperties(&prop, Default_GPU); // // cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks,backprj_ray_driven_3d_kernel,blockSize,0); // activeWarps = numBlocks * blockSize / prop.warpSize; // maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; // std::cout << "Occupancy: " << (double)activeWarps / maxWarps * 100 << "%" << std::endl; // std::cout << "MaxActiveBlocksPerMultiprocessor: " << numBlocks << std::endl; // goto endProgram; // } if (FISTA==-1) { /*Note: You need to first uncomment the phantom loading code to initialize a valid F_recon*/ CheckMatchedJointOperator(F_recon); goto endProgram; } if (FISTA==0) { printf("Undergoing SART updating... relaxation = %f\n", step_size); Reconstruction_3D_ray_driven_CBCT(d_recon, d_proj_data, step_size); // SaveDeviceDataToFile(d_recon,M*N*ZETA,"../Recon.dat"); if (lambda_TV>0.0f) { printf("Undergoing TV regularization ...\n"); switch(DenoiseOption) // Denoise options { case 1 : FGP_denoise_GPUx7_exact(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x7 volume size, fast case 2 : FGP_denoise_GPUx4_exact(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, slowest case 3 : FGP_denoise_GPUx4_apprx(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, slower case 4 : GP_denoise_GPUx4_fast(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, fast, slow in convergence } std::cout<<"TV regularization finished.\n"; } cudaMemcpy(F_recon, d_recon, d_volume_size, cudaMemcpyDeviceToHost); } if (FISTA==1) { printf("Undergoing SART updating... relaxation = %f\n", step_size); cudaMemcpy(d_recon, F_Y_k, d_volume_size, cudaMemcpyHostToDevice); Reconstruction_3D_ray_driven_CBCT(d_recon, d_proj_data, step_size); if (lambda_TV>0.0f) { printf("Undergoing TV regularization ...\n"); switch(DenoiseOption) // Denoise options { case 1 : FGP_denoise_GPUx7_exact(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x7 volume size, fast case 2 : FGP_denoise_GPUx4_exact(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, slowest case 3 : FGP_denoise_GPUx4_apprx(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, slower case 4 : GP_denoise_GPUx4_fast(d_recon, lambda_denoise_TV, Niter_denoise); break; // require x4 volume size, fast, slow in convergence } std::cout<<"TV regularization finished.\n"; } cudaMemcpy(F_recon, d_recon, d_volume_size, cudaMemcpyDeviceToHost); t_k = (1.0f + sqrt(1.0f + 4.0f*t_k_1*t_k_1) )/2.0f; // Note: t(k) = [1+sqrt(1+4*t(k-1)^2)]/2 for (int i=0;i<NO_VOXEL;i++) F_Y_k[i] = F_recon[i] + (t_k_1 -1.0f)/t_k * (F_recon[i] - F_X_k_1[i]); // Note: y(k) = x(k) + [t(k-1) -1]/t(k) * [x(k)-x(k-1)] t_k_1 = t_k; // Note: Update t(k-1): t(k-1) <- t(k) memcpy(F_X_k_1,F_recon,sizeof(float)*M*N*ZETA); // Note: Update x(k-1): x(k-1) <- x(k) } /*****************Calculating Obj Func Value ********************/ std::cout<<"Calculating Object Func Value ...\n"; //Note: object function value || Ax - b ||_2 + 2*lambda_TV*||f||_tvnorm + lambda_L1*||\phi f ||_L1 ; /*** data fidelity ****/ std::cout<<" - calculating data fidelity ... \n"; float *d_proj_forward = NULL; cudaMalloc((void**)&d_proj_forward, d_proj_data_size); cudaMemset(d_proj_forward, 0, d_proj_data_size); Forward_3D_ray_driven_siddon(d_recon, d_proj_forward); data_fidelity = L2_norm_gpu(d_proj_forward, d_proj_data); std::cout<<" * L2 Norm="<<data_fidelity<<endl; cudaFree(d_proj_forward); /*** TV norm ****/ std::cout<<" - calculating TV norm ... \n"; tv_value = TV_norm_gpu(d_recon); std::cout<<" * TV value="<<tv_value<<endl; /***** obj function ******/ object_function_value_xk = data_fidelity + 2.0f*lambda_TV*tv_value; //Note: object_function_value_xk = data_fidelity + 2.0f*lambda_TV*tv_value + 1.0f*lambda_L1*l1_value; object_function_array[k*3-3] = tv_value; object_function_array[k*3-2] = data_fidelity; object_function_array[k*3-1] = object_function_value_xk; std::cout<<"Object function value for x(k) = "<< tv_value <<" + "<< data_fidelity <<" = "<<object_function_value_xk <<std::endl; /***************** Saving ********************/ strcpy(filename,directory); sprintf(fn,"%s/%s_%d_%d_%d_%.0fum_new_view_%d_(%.0f,%.0f)_TV_%.2f_L1_%.2f_Lip_%.2f.recon",outputFolder, objectName, M,N,ZETA, volumn_x*1000000, Nviews, initialAngle, endAngle, lambda_TV, lambda_L1, Lip_con); strcat(filename,fn); if ( (fp = fopen(filename,"wb")) == NULL ) { printf("can not open file to write the intermediate reconstructed image \n"); printf("%s\n",filename); exit(0); } fwrite(F_recon,sizeof(float)*M*N*ZETA,1,fp); fclose(fp); strcpy(filename,directory); sprintf(fn,"%s/object_func_%s_view_%d_(%.0f,%.0f)_TV_%.2f_Lip_%.2f.bin",outputFolder, objectName, Nviews, initialAngle, endAngle, lambda_TV, Lip_con); strcat(filename,fn); if ( (fp = fopen(filename,"wb")) == NULL ) { printf("can not open file to write the tv_value_file \n"); printf("%s\n",filename); exit(0); } fwrite(object_function_array,sizeof(double)*k*3,1,fp); fclose(fp); if (k%5==1) { strcpy(filename,directory); sprintf(fn,"%s/%s_%d_%d_%d_%.0fum_iterative_%d_view_%d_(%.0f,%.0f)_TV_%.2f_L1_%.2f_Lip_%.2f.recon",outputFolder, objectName, M,N,ZETA, volumn_x*1000000, k, Nviews, initialAngle, endAngle, lambda_TV, lambda_L1, Lip_con); strcat(filename,fn); if ( (fp = fopen(filename,"wb")) == NULL ) { printf("can not open file to write the reconstructed image \n"); printf("%s\n",filename); exit(0); } fwrite(F_recon,sizeof(float)*M*N*ZETA,1,fp); fclose(fp); } // Note: F[i,j,k] = F [k*M*N+j*M+i]; i:row index; j:column index; k:layer index std::cout<<"Have done "<< k <<" iteration(s)"<<std::endl<<endl; } // End timer gettimeofday(&t2,NULL); printf("Whole computing (gettimeofday): %f (s)\n\n\n", (t2.tv_sec-t1.tv_sec + (t2.tv_usec-t1.tv_usec)/1000000.0)); endProgram: ; cudaFree(d_proj_data); cudaFree(d_recon); cudaFreeHost(F_recon); delete []F_Y_k; delete []F_X_k_1; delete []h_proj_forward; delete []h_proj_measured; delete []object_function_array; return 0; }
82129acfefd00cca09d844facd3c163357ed4bef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fmt/format.h> #include <iostream> #include "omp.h" #include<map> #include <roctracer/roctx.h> #include "clara/clara.hpp" #include "pangolin/pangolin.cuh" #include "pangolin/pangolin.hpp" #include "pangolin/algorithm/zero.cuh" #include "pangolin/algorithm/ktruss_incremental_multiGPU.cuh" #define UT uint32_t int getMaxK(std::map<UT, int> degree) { typedef std::map<UT, int>::reverse_iterator it_type; int maxK = 0; int reverseCount = 0; bool getNext = false; for (it_type m = degree.rbegin(); m != degree.rend(); m++) { int degree = m->first; int proposedKmax = degree + 1; reverseCount += m->second; if (reverseCount >= proposedKmax) { maxK = proposedKmax; break; } } return maxK; } int main(int argc, char **argv) { pangolin::init(); std::vector<int> gpus; std::string path; int iters = 1; bool help = false; bool debug = false; bool verbose = false; bool readMostly = false; bool accessedBy = false; bool prefetchAsync = false; clara::Parser cli; cli = cli | clara::Help(help); cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr"); cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr"); cli = cli | clara::Opt(gpus, "ids")["-g"]("gpus to use"); cli = cli | clara::Opt(readMostly)["--read-mostly"]( "mark data as read-mostly by all gpus before kernel"); cli = cli | clara::Opt(accessedBy)["--accessed-by"]( "mark data as accessed-by all GPUs before kernel"); cli = cli | clara::Opt(prefetchAsync)["--prefetch-async"]( "prefetch data to all GPUs before kernel"); cli = cli | clara::Opt(iters, "N")["-n"]("number of counts"); cli = cli | clara::Arg(path, "graph file")("Path to adjacency list").required(); auto result = cli.parse(clara::Args(argc, argv)); if (!result) { LOG(error, "Error in command line: {}", result.errorMessage()); exit(1); } if (help) { std::cout << cli; return 0; } // set logging level if (verbose) { pangolin::logger::set_level(pangolin::logger::Level::TRACE); } else if (debug) { pangolin::logger::set_level(pangolin::logger::Level::DEBUG); } // log command line before much else happens { std::string cmd; for (int i = 0; i < argc; ++i) { if (i != 0) { cmd += " "; } cmd += argv[i]; } LOG(debug, cmd); } LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH); LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC); LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH); LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES); #ifndef NDEBUG LOG(warn, "Not a release build"); #endif if (gpus.empty()) { LOG(warn, "no GPUs provided on command line, using GPU 0"); gpus.push_back(0); } // read data auto start = std::chrono::system_clock::now(); pangolin::EdgeListFile file(path); std::vector<pangolin::DiEdge<UT>> edges; std::vector<pangolin::DiEdge<UT>> fileEdges; while (file.get_edges(fileEdges, 10)) { edges.insert(edges.end(), fileEdges.begin(), fileEdges.end()); } double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "read_data time {}s", elapsed); LOG(debug, "read {} edges", edges.size()); // create csr and count `iters` times std::vector<double> times; uint64_t nnz; // create csr start = std::chrono::system_clock::now(); auto upperTriangular = [](pangolin::DiEdge<UT> e) { return true; //e.first < e.second; }; auto csr = pangolin::CSRCOO<UT>::from_edges(edges.begin(), edges.end(), upperTriangular); LOG(debug, "nnz = {}", csr.nnz()); elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "create CSR time {}s", elapsed); edges.clear(); edges.shrink_to_fit(); fileEdges.clear(); fileEdges.shrink_to_fit(); // accessed-by roctxRangePush("accessed-by"); start = std::chrono::system_clock::now(); if (accessedBy) { for (const auto &gpu : gpus) { csr.accessed_by(gpu); CUDA_RUNTIME(hipSetDevice(gpu)); CUDA_RUNTIME(hipDeviceSynchronize()); } } elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; roctxRangePop(); LOG(info, "accessed-by CSR time {}s", elapsed); // prefetch roctxRangePush("prefetch"); start = std::chrono::system_clock::now(); if (prefetchAsync) { for (const auto &gpu : gpus) { csr.prefetch_async(gpu); CUDA_RUNTIME(hipSetDevice(gpu)); CUDA_RUNTIME(hipDeviceSynchronize()); } } elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; roctxRangePop(); LOG(info, "prefetch CSR time {}s", elapsed); // count triangles roctxRangePush("count"); //start = std::chrono::system_clock::now(); { start = std::chrono::system_clock::now(); UT *rowPtr = csr.rowPtr_.data(); UT *rowInd = csr.rowInd_.data(); UT *colInd = csr.colInd_.data(); UT numEdges = csr.nnz(); int numGpus = gpus.size(); int numNodes = csr.num_nodes(); UT edgesPerGPU = (numEdges + numGpus - 1) / numGpus; UT *uSrcKp, *uDstKp, *uReversed; printf("GPus=%d, NNZ=%u, nr=%u\n", numGpus, numEdges, csr.num_rows()); CUDA_RUNTIME(hipMallocManaged((void **) &uSrcKp, numEdges*sizeof(UT))); CUDA_RUNTIME(hipMallocManaged((void **) &uDstKp, numEdges*sizeof(UT))); CUDA_RUNTIME(hipMallocManaged((void **) &uReversed, numEdges*sizeof(UT))); // CUDA_RUNTIME(hipMemAdvise(rowInd, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */)); // CUDA_RUNTIME(hipMemAdvise(colInd, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */)); // CUDA_RUNTIME(hipMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */)); // create async counters std::vector<pangolin::MultiGPU_Ktruss_Incremental> trussCounters; for (int dev : gpus) { LOG(info, "create device {} counter", dev); auto counter = pangolin::MultiGPU_Ktruss_Incremental(numEdges, dev); counter.CreateWorkspace(numEdges); trussCounters.push_back(counter); counter.InitializeWorkSpace_async(numEdges); } UT edgeStart = 0; for (auto &counter : trussCounters) { counter.selectedOut[0] = numEdges; const size_t edgeStop = ::min(edgeStart + edgesPerGPU, numEdges); const size_t edgesToProcess = edgeStop - edgeStart; counter.Inialize_Unified_async(edgeStart, edgesToProcess, rowPtr, rowInd, colInd, uSrcKp, uReversed); edgeStart += edgesPerGPU; } UT *ptrSrc, *ptrDst; UT *s1, *d1, *s2, *d2; s1 = rowInd; d1 = colInd; s2 = uSrcKp; d2 = uDstKp; ptrSrc = s1; ptrDst = d1; int kmin = 3; int kmax=-1; constexpr int dimBlock = 32; //For edges and nodes int dimGridEdges = (numEdges + dimBlock - 1) / dimBlock; if(numGpus > 1) { // CUDA_RUNTIME(hipMemAdvise(uReversed, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 /* ignored */)); } while(true) { LOG(info, "kmin={}, remaining edges={}", kmin, numEdges); bool firstTry = true; for (auto &counter : trussCounters) { counter.setDevice(); counter.hnumaffected[0] = 1; CUDA_RUNTIME(hipMemsetAsync(counter.gnumaffected,0,sizeof(UT),counter.stream())); } bool assumpAffected = true; while(assumpAffected) { assumpAffected = false; for (int i=0; i<numGpus;i++) { auto& counter = trussCounters[i]; counter.setDevice(); if(counter.hnumaffected[0]>0) { hipLaunchKernelGGL(( core_full_direct<dimBlock>), dim3(dimGridEdges),dim3(dimBlock),0,counter.stream(), counter.gnumdeleted, counter.gnumaffected, kmin+i, 0, numEdges, rowPtr, ptrSrc, ptrDst, counter.gKeep, counter.gAffected, uReversed, firstTry, 1); //Copy to host CUDA_RUNTIME(hipMemcpyAsync(counter.hnumaffected, counter.gnumaffected, sizeof(UT), hipMemcpyDeviceToHost, counter.stream())); CUDA_RUNTIME(hipMemcpyAsync(counter.hnumdeleted, counter.gnumdeleted, sizeof(UT), hipMemcpyDeviceToHost, counter.stream())); //Set gpu data to zeros CUDA_RUNTIME(hipMemsetAsync(counter.gnumdeleted,0,sizeof(UT),counter.stream())); CUDA_RUNTIME(hipMemsetAsync(counter.gnumaffected,0,sizeof(UT),counter.stream())); } } for (int i=0; i<numGpus;i++) { auto& counter = trussCounters[i]; counter.setDevice(); counter.sync(); assumpAffected = assumpAffected || (counter.hnumaffected[0]>0); counter.percentage_deleted_k = (counter.hnumdeleted[0])*1.0/numEdges; } firstTry = false; } bool foundKmax = false; int fallBackGPU = -1; for (int i=0; i<numGpus;i++) { auto& counter = trussCounters[i]; counter.setDevice(); if(numEdges - counter.hnumdeleted[0] > 0) { kmax = kmin + i; fallBackGPU = i; } else { foundKmax = true; break; } } kmin += numGpus; int counter = 0; if(!foundKmax) { auto& c = trussCounters[fallBackGPU]; float percDeleted = (c.hnumdeleted[0])*1.0/numEdges; if(percDeleted > 0.1) { //each gpu stores latest keep c.setDevice(); void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, s1, c.gKeep, s2, c.selectedOut, numEdges, c.stream()); CUDA_RUNTIME(hipMallocManaged(&d_temp_storage, temp_storage_bytes)); cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, s1, c.gKeep, s2, c.selectedOut, numEdges, c.stream()); cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d1, c.gKeep, d2, c.selectedOut, numEdges, c.stream()); CUDA_RUNTIME(hipFree(d_temp_storage)); // CUDA_RUNTIME(hipMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), hipMemAdviseUnsetReadMostly, 0)); // CUDA_RUNTIME(hipMemAdvise(uReversed, numEdges * sizeof(UT), hipMemAdviseUnsetReadMostly, 0)); // CUDA_RUNTIME(hipMemAdvise(s1, numEdges * sizeof(UT), hipMemAdviseUnsetReadMostly, 0)); // CUDA_RUNTIME(hipMemAdvise(d1, numEdges * sizeof(UT), hipMemAdviseUnsetReadMostly, 0)); // CUDA_RUNTIME(hipMemAdvise(s2, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 )); // CUDA_RUNTIME(hipMemAdvise(d2, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 )); numEdges = c.selectedOut[0]; edgesPerGPU = (numEdges + numGpus - 1) / numGpus; dimGridEdges = (numEdges + dimBlock - 1) / dimBlock; ptrSrc = s2; s2 = s1; s1 = ptrSrc; ptrDst = d2; d2 = d1; d1 = ptrDst; c.setDevice(); hipLaunchKernelGGL(( RebuildArrays<dimBlock>), dim3(dimGridEdges),dim3(dimBlock),0,c.stream(), 0, numEdges, numEdges, rowPtr, ptrSrc); hipLaunchKernelGGL(( RebuildReverse<dimBlock>), dim3(dimGridEdges),dim3(dimBlock),0,c.stream(), 0, numEdges, rowPtr, ptrSrc, ptrDst, uReversed); for (auto &counter : trussCounters) { counter.sync(); } /*roctxRangePush("Stream Compaction 6 out 12 hints time"); start = std::chrono::system_clock::now();*/ for (auto &counter : trussCounters) { counter.setDevice(); counter.InitializeWorkSpace_async(numEdges); } // CUDA_RUNTIME(hipMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), hipMemAdviseSetReadMostly, 0 )); // CUDA_RUNTIME(hipMemAdvise(uReversed, numEdges * sizeof(UT), hipMemAdviseSetReadMostly, 0 )); for (auto &counter : trussCounters) { counter.sync(); } /*elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; roctxRangePop(); LOG(info, "Stream Compaction 6 out 12 hints time {}s", elapsed);*/ } } else{ break; } } //printf("New Kmin = %d, New Kmax=%d\n", newKmin, newKmax); for (auto &counter : trussCounters) counter.free(); hipFree(uSrcKp); hipFree(uDstKp); hipFree(uReversed); elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; roctxRangePop(); LOG(info, "count time {}s", elapsed); LOG(info, "MOHA {} ktruss ({} teps)", kmax, csr.nnz() / elapsed); times.push_back(elapsed); //tris = total; nnz = csr.nnz(); //std::cout << path << ",\t" << nnz << ",\t" << tris; for (const auto &t : times) { std::cout << ",\t" << t; } std::cout << std::endl; } return 0; }
82129acfefd00cca09d844facd3c163357ed4bef.cu
#include <fmt/format.h> #include <iostream> #include "omp.h" #include<map> #include <nvToolsExt.h> #include "clara/clara.hpp" #include "pangolin/pangolin.cuh" #include "pangolin/pangolin.hpp" #include "pangolin/algorithm/zero.cuh" #include "pangolin/algorithm/ktruss_incremental_multiGPU.cuh" #define UT uint32_t int getMaxK(std::map<UT, int> degree) { typedef std::map<UT, int>::reverse_iterator it_type; int maxK = 0; int reverseCount = 0; bool getNext = false; for (it_type m = degree.rbegin(); m != degree.rend(); m++) { int degree = m->first; int proposedKmax = degree + 1; reverseCount += m->second; if (reverseCount >= proposedKmax) { maxK = proposedKmax; break; } } return maxK; } int main(int argc, char **argv) { pangolin::init(); std::vector<int> gpus; std::string path; int iters = 1; bool help = false; bool debug = false; bool verbose = false; bool readMostly = false; bool accessedBy = false; bool prefetchAsync = false; clara::Parser cli; cli = cli | clara::Help(help); cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr"); cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr"); cli = cli | clara::Opt(gpus, "ids")["-g"]("gpus to use"); cli = cli | clara::Opt(readMostly)["--read-mostly"]( "mark data as read-mostly by all gpus before kernel"); cli = cli | clara::Opt(accessedBy)["--accessed-by"]( "mark data as accessed-by all GPUs before kernel"); cli = cli | clara::Opt(prefetchAsync)["--prefetch-async"]( "prefetch data to all GPUs before kernel"); cli = cli | clara::Opt(iters, "N")["-n"]("number of counts"); cli = cli | clara::Arg(path, "graph file")("Path to adjacency list").required(); auto result = cli.parse(clara::Args(argc, argv)); if (!result) { LOG(error, "Error in command line: {}", result.errorMessage()); exit(1); } if (help) { std::cout << cli; return 0; } // set logging level if (verbose) { pangolin::logger::set_level(pangolin::logger::Level::TRACE); } else if (debug) { pangolin::logger::set_level(pangolin::logger::Level::DEBUG); } // log command line before much else happens { std::string cmd; for (int i = 0; i < argc; ++i) { if (i != 0) { cmd += " "; } cmd += argv[i]; } LOG(debug, cmd); } LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH); LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC); LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH); LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES); #ifndef NDEBUG LOG(warn, "Not a release build"); #endif if (gpus.empty()) { LOG(warn, "no GPUs provided on command line, using GPU 0"); gpus.push_back(0); } // read data auto start = std::chrono::system_clock::now(); pangolin::EdgeListFile file(path); std::vector<pangolin::DiEdge<UT>> edges; std::vector<pangolin::DiEdge<UT>> fileEdges; while (file.get_edges(fileEdges, 10)) { edges.insert(edges.end(), fileEdges.begin(), fileEdges.end()); } double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "read_data time {}s", elapsed); LOG(debug, "read {} edges", edges.size()); // create csr and count `iters` times std::vector<double> times; uint64_t nnz; // create csr start = std::chrono::system_clock::now(); auto upperTriangular = [](pangolin::DiEdge<UT> e) { return true; //e.first < e.second; }; auto csr = pangolin::CSRCOO<UT>::from_edges(edges.begin(), edges.end(), upperTriangular); LOG(debug, "nnz = {}", csr.nnz()); elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "create CSR time {}s", elapsed); edges.clear(); edges.shrink_to_fit(); fileEdges.clear(); fileEdges.shrink_to_fit(); // accessed-by nvtxRangePush("accessed-by"); start = std::chrono::system_clock::now(); if (accessedBy) { for (const auto &gpu : gpus) { csr.accessed_by(gpu); CUDA_RUNTIME(cudaSetDevice(gpu)); CUDA_RUNTIME(cudaDeviceSynchronize()); } } elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; nvtxRangePop(); LOG(info, "accessed-by CSR time {}s", elapsed); // prefetch nvtxRangePush("prefetch"); start = std::chrono::system_clock::now(); if (prefetchAsync) { for (const auto &gpu : gpus) { csr.prefetch_async(gpu); CUDA_RUNTIME(cudaSetDevice(gpu)); CUDA_RUNTIME(cudaDeviceSynchronize()); } } elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; nvtxRangePop(); LOG(info, "prefetch CSR time {}s", elapsed); // count triangles nvtxRangePush("count"); //start = std::chrono::system_clock::now(); { start = std::chrono::system_clock::now(); UT *rowPtr = csr.rowPtr_.data(); UT *rowInd = csr.rowInd_.data(); UT *colInd = csr.colInd_.data(); UT numEdges = csr.nnz(); int numGpus = gpus.size(); int numNodes = csr.num_nodes(); UT edgesPerGPU = (numEdges + numGpus - 1) / numGpus; UT *uSrcKp, *uDstKp, *uReversed; printf("GPus=%d, NNZ=%u, nr=%u\n", numGpus, numEdges, csr.num_rows()); CUDA_RUNTIME(cudaMallocManaged((void **) &uSrcKp, numEdges*sizeof(UT))); CUDA_RUNTIME(cudaMallocManaged((void **) &uDstKp, numEdges*sizeof(UT))); CUDA_RUNTIME(cudaMallocManaged((void **) &uReversed, numEdges*sizeof(UT))); // CUDA_RUNTIME(cudaMemAdvise(rowInd, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */)); // CUDA_RUNTIME(cudaMemAdvise(colInd, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */)); // CUDA_RUNTIME(cudaMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */)); // create async counters std::vector<pangolin::MultiGPU_Ktruss_Incremental> trussCounters; for (int dev : gpus) { LOG(info, "create device {} counter", dev); auto counter = pangolin::MultiGPU_Ktruss_Incremental(numEdges, dev); counter.CreateWorkspace(numEdges); trussCounters.push_back(counter); counter.InitializeWorkSpace_async(numEdges); } UT edgeStart = 0; for (auto &counter : trussCounters) { counter.selectedOut[0] = numEdges; const size_t edgeStop = std::min(edgeStart + edgesPerGPU, numEdges); const size_t edgesToProcess = edgeStop - edgeStart; counter.Inialize_Unified_async(edgeStart, edgesToProcess, rowPtr, rowInd, colInd, uSrcKp, uReversed); edgeStart += edgesPerGPU; } UT *ptrSrc, *ptrDst; UT *s1, *d1, *s2, *d2; s1 = rowInd; d1 = colInd; s2 = uSrcKp; d2 = uDstKp; ptrSrc = s1; ptrDst = d1; int kmin = 3; int kmax=-1; constexpr int dimBlock = 32; //For edges and nodes int dimGridEdges = (numEdges + dimBlock - 1) / dimBlock; if(numGpus > 1) { // CUDA_RUNTIME(cudaMemAdvise(uReversed, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 /* ignored */)); } while(true) { LOG(info, "kmin={}, remaining edges={}", kmin, numEdges); bool firstTry = true; for (auto &counter : trussCounters) { counter.setDevice(); counter.hnumaffected[0] = 1; CUDA_RUNTIME(cudaMemsetAsync(counter.gnumaffected,0,sizeof(UT),counter.stream())); } bool assumpAffected = true; while(assumpAffected) { assumpAffected = false; for (int i=0; i<numGpus;i++) { auto& counter = trussCounters[i]; counter.setDevice(); if(counter.hnumaffected[0]>0) { core_full_direct<dimBlock><<<dimGridEdges,dimBlock,0,counter.stream()>>>(counter.gnumdeleted, counter.gnumaffected, kmin+i, 0, numEdges, rowPtr, ptrSrc, ptrDst, counter.gKeep, counter.gAffected, uReversed, firstTry, 1); //Copy to host CUDA_RUNTIME(cudaMemcpyAsync(counter.hnumaffected, counter.gnumaffected, sizeof(UT), cudaMemcpyDeviceToHost, counter.stream())); CUDA_RUNTIME(cudaMemcpyAsync(counter.hnumdeleted, counter.gnumdeleted, sizeof(UT), cudaMemcpyDeviceToHost, counter.stream())); //Set gpu data to zeros CUDA_RUNTIME(cudaMemsetAsync(counter.gnumdeleted,0,sizeof(UT),counter.stream())); CUDA_RUNTIME(cudaMemsetAsync(counter.gnumaffected,0,sizeof(UT),counter.stream())); } } for (int i=0; i<numGpus;i++) { auto& counter = trussCounters[i]; counter.setDevice(); counter.sync(); assumpAffected = assumpAffected || (counter.hnumaffected[0]>0); counter.percentage_deleted_k = (counter.hnumdeleted[0])*1.0/numEdges; } firstTry = false; } bool foundKmax = false; int fallBackGPU = -1; for (int i=0; i<numGpus;i++) { auto& counter = trussCounters[i]; counter.setDevice(); if(numEdges - counter.hnumdeleted[0] > 0) { kmax = kmin + i; fallBackGPU = i; } else { foundKmax = true; break; } } kmin += numGpus; int counter = 0; if(!foundKmax) { auto& c = trussCounters[fallBackGPU]; float percDeleted = (c.hnumdeleted[0])*1.0/numEdges; if(percDeleted > 0.1) { //each gpu stores latest keep c.setDevice(); void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, s1, c.gKeep, s2, c.selectedOut, numEdges, c.stream()); CUDA_RUNTIME(cudaMallocManaged(&d_temp_storage, temp_storage_bytes)); cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, s1, c.gKeep, s2, c.selectedOut, numEdges, c.stream()); cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d1, c.gKeep, d2, c.selectedOut, numEdges, c.stream()); CUDA_RUNTIME(cudaFree(d_temp_storage)); // CUDA_RUNTIME(cudaMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), cudaMemAdviseUnsetReadMostly, 0)); // CUDA_RUNTIME(cudaMemAdvise(uReversed, numEdges * sizeof(UT), cudaMemAdviseUnsetReadMostly, 0)); // CUDA_RUNTIME(cudaMemAdvise(s1, numEdges * sizeof(UT), cudaMemAdviseUnsetReadMostly, 0)); // CUDA_RUNTIME(cudaMemAdvise(d1, numEdges * sizeof(UT), cudaMemAdviseUnsetReadMostly, 0)); // CUDA_RUNTIME(cudaMemAdvise(s2, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 )); // CUDA_RUNTIME(cudaMemAdvise(d2, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 )); numEdges = c.selectedOut[0]; edgesPerGPU = (numEdges + numGpus - 1) / numGpus; dimGridEdges = (numEdges + dimBlock - 1) / dimBlock; ptrSrc = s2; s2 = s1; s1 = ptrSrc; ptrDst = d2; d2 = d1; d1 = ptrDst; c.setDevice(); RebuildArrays<dimBlock><<<dimGridEdges,dimBlock,0,c.stream()>>>(0, numEdges, numEdges, rowPtr, ptrSrc); RebuildReverse<dimBlock><<<dimGridEdges,dimBlock,0,c.stream()>>>(0, numEdges, rowPtr, ptrSrc, ptrDst, uReversed); for (auto &counter : trussCounters) { counter.sync(); } /*nvtxRangePush("Stream Compaction 6 out 12 hints time"); start = std::chrono::system_clock::now();*/ for (auto &counter : trussCounters) { counter.setDevice(); counter.InitializeWorkSpace_async(numEdges); } // CUDA_RUNTIME(cudaMemAdvise(rowPtr, (numNodes+1) * sizeof(UT), cudaMemAdviseSetReadMostly, 0 )); // CUDA_RUNTIME(cudaMemAdvise(uReversed, numEdges * sizeof(UT), cudaMemAdviseSetReadMostly, 0 )); for (auto &counter : trussCounters) { counter.sync(); } /*elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; nvtxRangePop(); LOG(info, "Stream Compaction 6 out 12 hints time {}s", elapsed);*/ } } else{ break; } } //printf("New Kmin = %d, New Kmax=%d\n", newKmin, newKmax); for (auto &counter : trussCounters) counter.free(); cudaFree(uSrcKp); cudaFree(uDstKp); cudaFree(uReversed); elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; nvtxRangePop(); LOG(info, "count time {}s", elapsed); LOG(info, "MOHA {} ktruss ({} teps)", kmax, csr.nnz() / elapsed); times.push_back(elapsed); //tris = total; nnz = csr.nnz(); //std::cout << path << ",\t" << nnz << ",\t" << tris; for (const auto &t : times) { std::cout << ",\t" << t; } std::cout << std::endl; } return 0; }
f321b16f04d7f73cc47c662438b76e5dfe46f301.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define SHAREDSIZE 32 __global__ void MatrixMulKernel(double* A, double* B, double* C, int wA, int wB, int tileSize) { int BLOCK_SIZE = tileSize; // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // The element of the block sub-matrix that is computed // by the thread float Csub = 0; //create the shared memory for two sub-blocks in A and B respectively __shared__ volatile float As[SHAREDSIZE][SHAREDSIZE]; __shared__ volatile float Bs[SHAREDSIZE][SHAREDSIZE]; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the tiles from global memory into shared memory; // each thread loads one element of the two tiles from A & B As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); __threadfence_block(); // Each thread in this block computes one element // of the block sub-matrix (tile). Thread with indexes // ty and tx computes in this tile the entry [ty][tx] . for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension,int nCols, int tileSize, float* incTime ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=5) { printf("Usage: ./problem3 i j k N\n"); return 0; } int nRows = atoi(argv[1]); int nInnerDimension = atoi(argv[2]); int nCols = atoi(argv[3]); int num_elementsA= nRows*nInnerDimension; int num_elementsB=nInnerDimension*nCols; int num_elementsC= nRows*nCols; int tileSize = atoi(argv[4]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("problem3.inp",num_elementsA); double* hB = read_array("problem3.inp",num_elementsB); double* hC = (double*) malloc(num_elementsC * sizeof(double)); // **===-------- Modify the body of this function -----------===** computeOnDevice( hA, hB,hC, nRows, nInnerDimension, nCols, tileSize, &incTime); // **===-----------------------------------------------------------===** //cpu calculation check /*double check = 0.0; for(int i=0;i<nInnerDimension;i++){ check += hA[(nRows-1)*nInnerDimension+i]*hB[i*nCols+nCols-1]; } printf("%f\n", check); */ printf("%f\n%f\n%d\n%d\n%d\n",hC[num_elementsC-1],incTime,tileSize,nRows,nCols); // cleanup memory free(hA); free(hB); free(hC); return 0; } //ZeroPadthe matrix so that it could be exactly devided by Tile Size in both row and col double * zeroPadMatrix(double *unpadded, int row, int col, int paddedRow, int paddedCol, int TileSize, int copy) { double *paddedMatrix = (double *)calloc(paddedRow*paddedCol, sizeof(double)); //Copy the values from unpadded matrix to padded matrix if(copy){ for (int i=0;i<row;i++) { memcpy(&paddedMatrix[i*paddedCol], &unpadded[i*col], col*sizeof(double)); } } return paddedMatrix; } void extractPaddedMaxtrix(double *unpadded, double *padded, int row, int col, int paddedRow, int PaddedCol, int TileSize) { for(int i=0;i<row; i++){ memcpy(&unpadded[i*col], &padded[i*PaddedCol], col*sizeof(double)); } } //for debug use void printMatrix(double *matrix, int row, int col) { for(int i=0;i<row;i++){ for(int j=0;j<col;j++){ printf("%f ", matrix[i*col + j]); } printf("\n"); } printf("\n"); } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension, int nCols, int TileSize, float* incTime) { //calculate the size needed for padding int tempRow = (nRows-1)/TileSize + 1; int paddednRows = tempRow*TileSize; int tempnInnerDimension = (nInnerDimension-1)/TileSize + 1; int paddedtempnInnerDimension = tempnInnerDimension*TileSize; int tempCol = (nCols-1)/TileSize + 1; int paddednCols = tempCol*TileSize; //zero paddding double *paddedA = zeroPadMatrix(hA, nRows, nInnerDimension, paddednRows, paddedtempnInnerDimension, TileSize, 1); double *paddedB = zeroPadMatrix(hB, nInnerDimension, nCols, paddedtempnInnerDimension, paddednCols, TileSize, 1); double *paddedC = zeroPadMatrix(hB, nRows, nCols, paddednRows, paddednCols, TileSize, 0); //printMatrix(paddedA, paddednRows, paddedtempnInnerDimension); //printMatrix(paddedB, paddedtempnInnerDimension, paddednCols); //start inclusive timing hipEvent_t startIn,stopIn; hipEventCreate(&startIn); hipEventCreate(&stopIn); hipEventRecord(startIn, 0); //allocate the device memory double *dA, *dB, *dC; hipMalloc((void **)&dA, sizeof(double)*paddednRows*paddedtempnInnerDimension); hipMalloc((void **)&dB, sizeof(double)*paddedtempnInnerDimension*paddednCols); hipMalloc((void **)&dC, sizeof(double)*paddednRows*paddednCols); //copy from host to device hipMemcpy(dA, paddedA, sizeof(double)*paddednRows*paddedtempnInnerDimension, hipMemcpyHostToDevice); hipMemcpy(dB, paddedB, sizeof(double)*paddedtempnInnerDimension*paddednCols, hipMemcpyHostToDevice); dim3 dimGrid(paddednCols/TileSize, paddednRows/TileSize); dim3 dimBlock(TileSize,TileSize); hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dA, dB, dC, paddedtempnInnerDimension, paddednCols, TileSize); hipMemcpy(paddedC, dC, sizeof(double)*paddednRows*paddednCols,hipMemcpyDeviceToHost); extractPaddedMaxtrix(hC, paddedC, nRows, nCols, paddednRows, paddednCols, TileSize); //stop inclusive timing hipEventRecord(stopIn, 0); hipEventSynchronize(stopIn); hipEventElapsedTime(incTime, startIn, stopIn); hipEventDestroy(startIn); hipEventDestroy(stopIn); return;//Placeholder }
f321b16f04d7f73cc47c662438b76e5dfe46f301.cu
#ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define SHAREDSIZE 32 __global__ void MatrixMulKernel(double* A, double* B, double* C, int wA, int wB, int tileSize) { int BLOCK_SIZE = tileSize; // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // The element of the block sub-matrix that is computed // by the thread float Csub = 0; //create the shared memory for two sub-blocks in A and B respectively __shared__ volatile float As[SHAREDSIZE][SHAREDSIZE]; __shared__ volatile float Bs[SHAREDSIZE][SHAREDSIZE]; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the tiles from global memory into shared memory; // each thread loads one element of the two tiles from A & B As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); __threadfence_block(); // Each thread in this block computes one element // of the block sub-matrix (tile). Thread with indexes // ty and tx computes in this tile the entry [ty][tx] . for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension,int nCols, int tileSize, float* incTime ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=5) { printf("Usage: ./problem3 i j k N\n"); return 0; } int nRows = atoi(argv[1]); int nInnerDimension = atoi(argv[2]); int nCols = atoi(argv[3]); int num_elementsA= nRows*nInnerDimension; int num_elementsB=nInnerDimension*nCols; int num_elementsC= nRows*nCols; int tileSize = atoi(argv[4]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("problem3.inp",num_elementsA); double* hB = read_array("problem3.inp",num_elementsB); double* hC = (double*) malloc(num_elementsC * sizeof(double)); // **===-------- Modify the body of this function -----------===** computeOnDevice( hA, hB,hC, nRows, nInnerDimension, nCols, tileSize, &incTime); // **===-----------------------------------------------------------===** //cpu calculation check /*double check = 0.0; for(int i=0;i<nInnerDimension;i++){ check += hA[(nRows-1)*nInnerDimension+i]*hB[i*nCols+nCols-1]; } printf("%f\n", check); */ printf("%f\n%f\n%d\n%d\n%d\n",hC[num_elementsC-1],incTime,tileSize,nRows,nCols); // cleanup memory free(hA); free(hB); free(hC); return 0; } //ZeroPadthe matrix so that it could be exactly devided by Tile Size in both row and col double * zeroPadMatrix(double *unpadded, int row, int col, int paddedRow, int paddedCol, int TileSize, int copy) { double *paddedMatrix = (double *)calloc(paddedRow*paddedCol, sizeof(double)); //Copy the values from unpadded matrix to padded matrix if(copy){ for (int i=0;i<row;i++) { memcpy(&paddedMatrix[i*paddedCol], &unpadded[i*col], col*sizeof(double)); } } return paddedMatrix; } void extractPaddedMaxtrix(double *unpadded, double *padded, int row, int col, int paddedRow, int PaddedCol, int TileSize) { for(int i=0;i<row; i++){ memcpy(&unpadded[i*col], &padded[i*PaddedCol], col*sizeof(double)); } } //for debug use void printMatrix(double *matrix, int row, int col) { for(int i=0;i<row;i++){ for(int j=0;j<col;j++){ printf("%f ", matrix[i*col + j]); } printf("\n"); } printf("\n"); } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension, int nCols, int TileSize, float* incTime) { //calculate the size needed for padding int tempRow = (nRows-1)/TileSize + 1; int paddednRows = tempRow*TileSize; int tempnInnerDimension = (nInnerDimension-1)/TileSize + 1; int paddedtempnInnerDimension = tempnInnerDimension*TileSize; int tempCol = (nCols-1)/TileSize + 1; int paddednCols = tempCol*TileSize; //zero paddding double *paddedA = zeroPadMatrix(hA, nRows, nInnerDimension, paddednRows, paddedtempnInnerDimension, TileSize, 1); double *paddedB = zeroPadMatrix(hB, nInnerDimension, nCols, paddedtempnInnerDimension, paddednCols, TileSize, 1); double *paddedC = zeroPadMatrix(hB, nRows, nCols, paddednRows, paddednCols, TileSize, 0); //printMatrix(paddedA, paddednRows, paddedtempnInnerDimension); //printMatrix(paddedB, paddedtempnInnerDimension, paddednCols); //start inclusive timing cudaEvent_t startIn,stopIn; cudaEventCreate(&startIn); cudaEventCreate(&stopIn); cudaEventRecord(startIn, 0); //allocate the device memory double *dA, *dB, *dC; cudaMalloc((void **)&dA, sizeof(double)*paddednRows*paddedtempnInnerDimension); cudaMalloc((void **)&dB, sizeof(double)*paddedtempnInnerDimension*paddednCols); cudaMalloc((void **)&dC, sizeof(double)*paddednRows*paddednCols); //copy from host to device cudaMemcpy(dA, paddedA, sizeof(double)*paddednRows*paddedtempnInnerDimension, cudaMemcpyHostToDevice); cudaMemcpy(dB, paddedB, sizeof(double)*paddedtempnInnerDimension*paddednCols, cudaMemcpyHostToDevice); dim3 dimGrid(paddednCols/TileSize, paddednRows/TileSize); dim3 dimBlock(TileSize,TileSize); MatrixMulKernel<<<dimGrid, dimBlock>>>(dA, dB, dC, paddedtempnInnerDimension, paddednCols, TileSize); cudaMemcpy(paddedC, dC, sizeof(double)*paddednRows*paddednCols,cudaMemcpyDeviceToHost); extractPaddedMaxtrix(hC, paddedC, nRows, nCols, paddednRows, paddednCols, TileSize); //stop inclusive timing cudaEventRecord(stopIn, 0); cudaEventSynchronize(stopIn); cudaEventElapsedTime(incTime, startIn, stopIn); cudaEventDestroy(startIn); cudaEventDestroy(stopIn); return;//Placeholder }
c502758793ea69fb0de6091fb2dd8e1903bde421.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "su/pixel.hpp" #include "gisp_util.cuh" #include "GISP.hpp" #include <fstream> using su::PixI; namespace sp{ __device__ __forceinline__ void tDNSymE2x2(float a00, float a01, float a11, float &e0, float &e1, float &v00, float &v01, float &v10, float &v11){ float const zero = (float)0, one = (float)1, half = (float)0.5; float c2 = half * (a00 - a11), s2 = a01; float maxAbsComp = max(abs(c2), abs(s2)); if (maxAbsComp > zero){ c2 /= maxAbsComp; // in [-1,1] s2 /= maxAbsComp; // in [-1,1] float length = sqrt(c2 * c2 + s2 * s2); c2 /= length; s2 /= length; if (c2 > zero){ c2 = -c2; s2 = -s2; } } else{ c2 = -one; s2 = zero; } float s = sqrt(half * (one - c2)); // >= 1/sqrt(2) float c = half * s2 / s; float csqr = c * c, ssqr = s * s, mid = s2 * a01; e0 = csqr * a00 + mid + ssqr * a11; e1 = csqr * a11 - mid + ssqr * a00; v00 = c; v01 = s; v10 = -s; v11 = c; } // one theta one block // _NW : number of warps per block template<int _BX, int _BY, int _NW = ((_BX*_BY) >> 5)> __global__ void kernel_update_theta( float *oR, int oR_steps, int oR_lysize, PixI *iC, int iC_steps, float *MG, int MG_steps, int W, int H, int v_x, int v_y, int n_x, int n_y, int rl, int ru, float e_s, float e_c ){ #define t_x 1 #define t_y 1 int i = 0; #define __SHFREDUC(_var) for (i = 16; i > 0; i = (i >> 1)) _var += __shfl_xor(_var, i, 32); #define __SUMARIZE(_var) smemMG[wy][wx] = _var; __syncthreads(); _var = 0; for (i = 0; i < _NW; i++){_var+=smemMG[i][wx];} __syncthreads(); #define __SUM0WARP(_var) smemMG[wy][wx] = _var; __syncthreads();if(wy==0){_var = 0; for (i = 0; i < _NW; i++) _var+=smemMG[i][wx];} __syncthreads(); int ix = threadIdx.x; int iy = threadIdx.y; int wx = (iy*_BX + ix) & 31; // thread id in a warp, can use asm(); int wy = (iy*_BX + ix) >> 5; // warp id in a block __shared__ float smemMG[_NW][32]; int k_x = blockIdx.x; int k_y = blockIdx.y; int ik = k_x + k_y * n_x; int XXB = 0, XXE = 0, YYB = 0, YYE = 0; float mx = 0, my = 0, ml = 0, ma = 0, mb = 0, md = 0; int x = 0; int y = 0; dranging(XXB, XXE, YYB, YYE, W, H, n_x, n_y, k_x, k_y, v_x, v_y, rl, ru); for (y = YYB + iy; y < YYE; y += _BY){ for (x = XXB + ix; x < XXE; x += _BX){ const float RV = oR[(y - YYB)*oR_steps + (x - XXB) + ik*oR_lysize];// ik*oR_lysize can be moved out of this loop const su::PixI pix = iC[y*iC_steps + x]; mx += RV*x; my += RV*y; ml += RV*pix.f0(); ma += RV*pix.f1(); mb += RV*pix.f2(); md += RV; } } __SHFREDUC(md);__SUMARIZE(md); if (md < 1.f) return; // for each warp __SHFREDUC(mx); __SHFREDUC(my); __SHFREDUC(ml); __SHFREDUC(ma); __SHFREDUC(mb); __SUMARIZE(mx); __SUMARIZE(my); __SUMARIZE(ml); __SUMARIZE(ma); __SUMARIZE(mb); md = 1.f / md; mx = mx * md; my = my * md; ml = ml * md; ma = ma * md; mb = mb * md; ////////////////////////////////////////////////////////////////////////// // sigma float xy00 = 0, xy01 = 0, xy11 = 0, ab00 = 0, ab01 = 0, ab11 = 0; float sl = 0, tp0 = 0, tp1 = 0; for (y = YYB + iy; y < YYE; y += _BY){ for (x = XXB + ix; x < XXE; x += _BX){ const float RV = oR[(y - YYB)*oR_steps + (x - XXB) + ik*oR_lysize];// has been aligned const su::PixI pix = iC[y*iC_steps + x]; tp0 = x - mx; tp1 = y - my; xy00 += RV * tp0 * tp0; xy01 += RV * tp0 * tp1; xy11 += RV * tp1 * tp1; tp0 = pix.f0() - ml; sl += RV * tp0 * tp0; tp0 = pix.f1() - ma; tp1 = pix.f2() - mb; ab00 += RV * tp0 * tp0; ab01 += RV * tp0 * tp1; ab11 += RV * tp1 * tp1; } } __SHFREDUC(xy00); __SHFREDUC(xy01); __SHFREDUC(xy11); __SHFREDUC(ab00); __SHFREDUC(ab01); __SHFREDUC(ab11); __SHFREDUC(sl); __SUM0WARP(xy00); __SUM0WARP(xy01); __SUM0WARP(xy11); __SUM0WARP(ab00); __SUM0WARP(ab01); __SUM0WARP(ab11); __SUM0WARP(sl); float vxy00 = 0, vxy01 = 0, vxy10 = 0, vxy11 = 0, isx = 0, isy = 0; float vab00 = 0, vab01 = 0, vab10 = 0, vab11 = 0, isa = 0, isb = 0; float isl = 0, isd = 0; if (ix == 0 && iy == 0){ xy00 = xy00 * md; xy01 = xy01 * md; xy11 = xy11 * md; tDNSymE2x2(xy00, xy01, xy11, isx, isy, vxy00, vxy01, vxy10, vxy11); if (isx < e_s) { isx = e_s; } isx = 1. / isx; if (isy < e_s) { isy = e_s; } isy = 1. / isy; ab00 = ab00 * md; ab01 = ab01 * md; ab11 = ab11 * md; tDNSymE2x2(ab00, ab01, ab11, isa, isb, vab00, vab01, vab10, vab11); if (isa < e_c) { isa = e_c; } isa = 1. / isa; if (isb < e_c) { isb = e_c; } isb = 1. / isb; sl = sl * md; if (sl < e_c){ sl = e_c; }isl = 1. / sl; isd = sqrt(isl * isx * isy * isa * isb); // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22 // mx, my, ml, ma, mb, isx, isy, isl, isa, isb, vxy00, vxy01, vxy10, vxy11, vab00, vab01, vab10, vab11, isd, xxb, yyb, xxe, yye smemMG[0][ 0] = mx; smemMG[0][ 1] = my; smemMG[0][ 2] = ml; smemMG[0][ 3] = ma; smemMG[0][ 4] = mb; smemMG[0][ 5] = isx; smemMG[0][ 6] = isy; smemMG[0][ 7] = isl; smemMG[0][ 8] = isa; smemMG[0][ 9] = isb; smemMG[0][10] = vxy00; smemMG[0][11] = vxy01; smemMG[0][12] = vxy10; smemMG[0][13] = vxy11; smemMG[0][14] = vab00; smemMG[0][15] = vab01; smemMG[0][16] = vab10; smemMG[0][17] = vab11; smemMG[0][18] = isd; } __syncthreads(); if (wy == 0){ if (wx < 19) MG[ik*MG_steps + wx] = smemMG[0][wx]; } #undef __SHFREDUC #undef t_x #undef t_y #undef __SUMARIZE #undef __SUM0WARP } void gpu_update_theta( float *oR, int oR_steps, int oR_lysize, su::PixI *iC, int iC_steps, float *MG, int MG_steps, int W, int H, int v_x, int v_y, int n_x, int n_y, int rl, int ru, float e_s, float e_c ){ #define _BX 16 #define _BY 8 dim3 blocks(_BX, _BY); dim3 grid(n_x, n_y); kernel_update_theta<_BX, _BY> <<<grid, blocks >> >(oR, oR_steps, oR_lysize, iC, iC_steps, MG, MG_steps, W, H, v_x, v_y, n_x, n_y, rl, ru, e_s, e_c); #undef _BY #undef _BX } }
c502758793ea69fb0de6091fb2dd8e1903bde421.cu
#include "su/pixel.hpp" #include "gisp_util.cuh" #include "GISP.hpp" #include <fstream> using su::PixI; namespace sp{ __device__ __forceinline__ void tDNSymE2x2(float a00, float a01, float a11, float &e0, float &e1, float &v00, float &v01, float &v10, float &v11){ float const zero = (float)0, one = (float)1, half = (float)0.5; float c2 = half * (a00 - a11), s2 = a01; float maxAbsComp = max(abs(c2), abs(s2)); if (maxAbsComp > zero){ c2 /= maxAbsComp; // in [-1,1] s2 /= maxAbsComp; // in [-1,1] float length = sqrt(c2 * c2 + s2 * s2); c2 /= length; s2 /= length; if (c2 > zero){ c2 = -c2; s2 = -s2; } } else{ c2 = -one; s2 = zero; } float s = sqrt(half * (one - c2)); // >= 1/sqrt(2) float c = half * s2 / s; float csqr = c * c, ssqr = s * s, mid = s2 * a01; e0 = csqr * a00 + mid + ssqr * a11; e1 = csqr * a11 - mid + ssqr * a00; v00 = c; v01 = s; v10 = -s; v11 = c; } // one theta one block // _NW : number of warps per block template<int _BX, int _BY, int _NW = ((_BX*_BY) >> 5)> __global__ void kernel_update_theta( float *oR, int oR_steps, int oR_lysize, PixI *iC, int iC_steps, float *MG, int MG_steps, int W, int H, int v_x, int v_y, int n_x, int n_y, int rl, int ru, float e_s, float e_c ){ #define t_x 1 #define t_y 1 int i = 0; #define __SHFREDUC(_var) for (i = 16; i > 0; i = (i >> 1)) _var += __shfl_xor(_var, i, 32); #define __SUMARIZE(_var) smemMG[wy][wx] = _var; __syncthreads(); _var = 0; for (i = 0; i < _NW; i++){_var+=smemMG[i][wx];} __syncthreads(); #define __SUM0WARP(_var) smemMG[wy][wx] = _var; __syncthreads();if(wy==0){_var = 0; for (i = 0; i < _NW; i++) _var+=smemMG[i][wx];} __syncthreads(); int ix = threadIdx.x; int iy = threadIdx.y; int wx = (iy*_BX + ix) & 31; // thread id in a warp, can use asm(); int wy = (iy*_BX + ix) >> 5; // warp id in a block __shared__ float smemMG[_NW][32]; int k_x = blockIdx.x; int k_y = blockIdx.y; int ik = k_x + k_y * n_x; int XXB = 0, XXE = 0, YYB = 0, YYE = 0; float mx = 0, my = 0, ml = 0, ma = 0, mb = 0, md = 0; int x = 0; int y = 0; dranging(XXB, XXE, YYB, YYE, W, H, n_x, n_y, k_x, k_y, v_x, v_y, rl, ru); for (y = YYB + iy; y < YYE; y += _BY){ for (x = XXB + ix; x < XXE; x += _BX){ const float RV = oR[(y - YYB)*oR_steps + (x - XXB) + ik*oR_lysize];// ik*oR_lysize can be moved out of this loop const su::PixI pix = iC[y*iC_steps + x]; mx += RV*x; my += RV*y; ml += RV*pix.f0(); ma += RV*pix.f1(); mb += RV*pix.f2(); md += RV; } } __SHFREDUC(md);__SUMARIZE(md); if (md < 1.f) return; // for each warp __SHFREDUC(mx); __SHFREDUC(my); __SHFREDUC(ml); __SHFREDUC(ma); __SHFREDUC(mb); __SUMARIZE(mx); __SUMARIZE(my); __SUMARIZE(ml); __SUMARIZE(ma); __SUMARIZE(mb); md = 1.f / md; mx = mx * md; my = my * md; ml = ml * md; ma = ma * md; mb = mb * md; ////////////////////////////////////////////////////////////////////////// // sigma float xy00 = 0, xy01 = 0, xy11 = 0, ab00 = 0, ab01 = 0, ab11 = 0; float sl = 0, tp0 = 0, tp1 = 0; for (y = YYB + iy; y < YYE; y += _BY){ for (x = XXB + ix; x < XXE; x += _BX){ const float RV = oR[(y - YYB)*oR_steps + (x - XXB) + ik*oR_lysize];// has been aligned const su::PixI pix = iC[y*iC_steps + x]; tp0 = x - mx; tp1 = y - my; xy00 += RV * tp0 * tp0; xy01 += RV * tp0 * tp1; xy11 += RV * tp1 * tp1; tp0 = pix.f0() - ml; sl += RV * tp0 * tp0; tp0 = pix.f1() - ma; tp1 = pix.f2() - mb; ab00 += RV * tp0 * tp0; ab01 += RV * tp0 * tp1; ab11 += RV * tp1 * tp1; } } __SHFREDUC(xy00); __SHFREDUC(xy01); __SHFREDUC(xy11); __SHFREDUC(ab00); __SHFREDUC(ab01); __SHFREDUC(ab11); __SHFREDUC(sl); __SUM0WARP(xy00); __SUM0WARP(xy01); __SUM0WARP(xy11); __SUM0WARP(ab00); __SUM0WARP(ab01); __SUM0WARP(ab11); __SUM0WARP(sl); float vxy00 = 0, vxy01 = 0, vxy10 = 0, vxy11 = 0, isx = 0, isy = 0; float vab00 = 0, vab01 = 0, vab10 = 0, vab11 = 0, isa = 0, isb = 0; float isl = 0, isd = 0; if (ix == 0 && iy == 0){ xy00 = xy00 * md; xy01 = xy01 * md; xy11 = xy11 * md; tDNSymE2x2(xy00, xy01, xy11, isx, isy, vxy00, vxy01, vxy10, vxy11); if (isx < e_s) { isx = e_s; } isx = 1. / isx; if (isy < e_s) { isy = e_s; } isy = 1. / isy; ab00 = ab00 * md; ab01 = ab01 * md; ab11 = ab11 * md; tDNSymE2x2(ab00, ab01, ab11, isa, isb, vab00, vab01, vab10, vab11); if (isa < e_c) { isa = e_c; } isa = 1. / isa; if (isb < e_c) { isb = e_c; } isb = 1. / isb; sl = sl * md; if (sl < e_c){ sl = e_c; }isl = 1. / sl; isd = sqrt(isl * isx * isy * isa * isb); // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22 // mx, my, ml, ma, mb, isx, isy, isl, isa, isb, vxy00, vxy01, vxy10, vxy11, vab00, vab01, vab10, vab11, isd, xxb, yyb, xxe, yye smemMG[0][ 0] = mx; smemMG[0][ 1] = my; smemMG[0][ 2] = ml; smemMG[0][ 3] = ma; smemMG[0][ 4] = mb; smemMG[0][ 5] = isx; smemMG[0][ 6] = isy; smemMG[0][ 7] = isl; smemMG[0][ 8] = isa; smemMG[0][ 9] = isb; smemMG[0][10] = vxy00; smemMG[0][11] = vxy01; smemMG[0][12] = vxy10; smemMG[0][13] = vxy11; smemMG[0][14] = vab00; smemMG[0][15] = vab01; smemMG[0][16] = vab10; smemMG[0][17] = vab11; smemMG[0][18] = isd; } __syncthreads(); if (wy == 0){ if (wx < 19) MG[ik*MG_steps + wx] = smemMG[0][wx]; } #undef __SHFREDUC #undef t_x #undef t_y #undef __SUMARIZE #undef __SUM0WARP } void gpu_update_theta( float *oR, int oR_steps, int oR_lysize, su::PixI *iC, int iC_steps, float *MG, int MG_steps, int W, int H, int v_x, int v_y, int n_x, int n_y, int rl, int ru, float e_s, float e_c ){ #define _BX 16 #define _BY 8 dim3 blocks(_BX, _BY); dim3 grid(n_x, n_y); kernel_update_theta<_BX, _BY> <<<grid, blocks >> >(oR, oR_steps, oR_lysize, iC, iC_steps, MG, MG_steps, W, H, v_x, v_y, n_x, n_y, rl, ru, e_s, e_c); #undef _BY #undef _BX } }
a1ff1a0c919db470bd9ecf972247d0fa65272e28.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <hip/hip_runtime.h> #include "image_function_cuda.cuh" #include "../parameter_validation.h" #include "../image_function_helper.h" #include "cuda_types.cuh" #include "cuda_helper.cuh" namespace { struct FunctionRegistrator { Image_Function_Helper::FunctionTableHolder table; FunctionRegistrator() { table.AbsoluteDifference = &Image_Function_Cuda::AbsoluteDifference; table.BitwiseAnd = &Image_Function_Cuda::BitwiseAnd; table.BitwiseOr = &Image_Function_Cuda::BitwiseOr; table.BitwiseXor = &Image_Function_Cuda::BitwiseXor; table.ConvertToGrayScale = &Image_Function_Cuda::ConvertToGrayScale; table.ConvertToRgb = &Image_Function_Cuda::ConvertToRgb; table.Copy = &Image_Function_Cuda::Copy; table.ExtractChannel = &Image_Function_Cuda::ExtractChannel; table.Fill = &Image_Function_Cuda::Fill; table.GammaCorrection = &Image_Function_Cuda::GammaCorrection; table.Histogram = &Image_Function_Cuda::Histogram; table.Invert = &Image_Function_Cuda::Invert; table.LookupTable = &Image_Function_Cuda::LookupTable; table.SetPixel = &Image_Function_Cuda::SetPixel; table.Maximum = &Image_Function_Cuda::Maximum; table.Minimum = &Image_Function_Cuda::Minimum; table.ProjectionProfile = &Image_Function_Cuda::ProjectionProfile; table.Subtract = &Image_Function_Cuda::Subtract; table.Threshold = &Image_Function_Cuda::Threshold; table.Threshold2 = &Image_Function_Cuda::Threshold; ImageTypeManager::instance().setFunctionTable( penguinV::ImageCuda().type(), table ); ImageTypeManager::instance().setConvertFunction( Image_Function_Cuda::ConvertToCuda, penguinV::Image(), penguinV::ImageCuda() ); ImageTypeManager::instance().setConvertFunction( Image_Function_Cuda::ConvertFromCuda, penguinV::ImageCuda(), penguinV::Image() ); } }; const FunctionRegistrator functionRegistrator; // The list of CUDA device functions on device side __global__ void absoluteDifferenceCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * in1X = in1 + y * rowSizeIn1 + x; const uint8_t * in2X = in2 + y * rowSizeIn2 + x; uint8_t * outX = out + y * rowSizeOut + x; (*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : ((*in2X) - (*in1X)); } } __global__ void bitwiseAndCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t idIn1 = y * rowSizeIn1 + x; const uint32_t idIn2 = y * rowSizeIn2 + x; const uint32_t idOut = y * rowSizeOut + x; out[idOut] = in1[idIn1] & in2[idIn2]; } } __global__ void bitwiseOrCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t idIn1 = y * rowSizeIn1 + x; const uint32_t idIn2 = y * rowSizeIn2 + x; const uint32_t idOut = y * rowSizeOut + x; out[idOut] = in1[idIn1] | in2[idIn2]; } } __global__ void bitwiseXorCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t idIn1 = y * rowSizeIn1 + x; const uint32_t idIn2 = y * rowSizeIn2 + x; const uint32_t idOut = y * rowSizeOut + x; out[idOut] = in1[idIn1] ^ in2[idIn2]; } } __global__ void convertToGrayScaleCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t colorCount, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * data = in + y * rowSizeIn + x * colorCount; const uint8_t * dataEnd = data + colorCount; uint32_t sum = 0; for ( ; data != dataEnd; ++data ) { sum += (*data); } const uint32_t id = y * rowSizeOut + x; out[id] = static_cast<uint8_t>(sum / colorCount); } } __global__ void convertToRgbCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint8_t colorCount, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * dataIn = in + y * rowSizeIn + x; uint8_t * dataOut = out + y * rowSizeOut + x * colorCount; const uint8_t * dataOutEnd = dataOut + colorCount; for ( ; dataOut != dataOutEnd; ++dataOut ) { (*dataOut) = (*dataIn); } } } __global__ void copyCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { out[y * rowSizeOut + x] = in[y * rowSizeIn + x]; } } __global__ void extractChannelCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t colorCount, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) out[y * rowSizeOut + x] = in[y * rowSizeIn + x * colorCount]; } __global__ void fillCuda( uint8_t * data, uint32_t rowSize, uint32_t width, uint32_t height, uint8_t value ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) data[y * rowSize + x] = value; } __global__ void flipCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height, bool horizontal, bool vertical ) { const uint32_t inX = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t inY = blockDim.y * blockIdx.y + threadIdx.y; if ( inX < width && inY < height ) { const uint32_t outX = horizontal ? (width - 1 - inX) : inX; const uint32_t outY = vertical ? (height - 1 - inY) : inY; out[outY * rowSizeOut + outX] = in[inY * rowSizeIn + inX]; } } __global__ void histogramCuda( const uint8_t * data, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * histogram ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t id = y * rowSize + x; atomicAdd( &histogram[data[id]], 1 ); } } __global__ void invertCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { out[y * rowSizeOut + x] = ~in[y * rowSizeIn + x]; } } __global__ void isEqualCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint32_t width, uint32_t height, uint32_t * isEqual ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t partsEqual = static_cast<uint32_t>( in1[y * rowSizeIn1 + x] == in2[y * rowSizeIn2 + x] ); atomicAnd( isEqual, partsEqual ); } } __global__ void lookupTableCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height, uint8_t * table ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { out[y * rowSizeOut + x] = table[in[y * rowSizeIn + x]]; } } __global__ void maximumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * in1X = in1 + y * rowSizeIn1 + x; const uint8_t * in2X = in2 + y * rowSizeIn2 + x; uint8_t * outX = out + y * rowSizeOut + x; (*outX) = ((*in1X) > ( *in2X )) ? (*in1X) : (*in2X); } } __global__ void minimumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * in1X = in1 + y * rowSizeIn1 + x; const uint8_t * in2X = in2 + y * rowSizeIn2 + x; uint8_t * outX = out + y * rowSizeOut + x; (*outX) = ((*in1X) < (*in2X)) ? (*in1X) : (*in2X); } } __global__ void projectionProfileHorizontalCuda( const uint8_t * image, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * projection ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * imageX = image + y * rowSize + x; atomicAdd( &projection[x], (*imageX) ); } } __global__ void projectionProfileVerticalCuda( const uint8_t * image, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * projection ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * imageY = image + y * rowSize + x; atomicAdd( &projection[y], (*imageY) ); } } __global__ void rotateCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, float inXStart, float inYStart, uint32_t width, uint32_t height, float cosAngle, float sinAngle ) { uint32_t outX = blockDim.x * blockIdx.x + threadIdx.x; uint32_t outY = blockDim.y * blockIdx.y + threadIdx.y; // Only do something if this thread is for a valid pixel in the output if ( outX < width && outY < height ) { // Both input coordinates are shifted using the cosAngle, sinAngle, outX, and outY. The shift // comes from inverse rotating the horizontal and vertical iterations over the output. // Note that inverse rotation by X axis is [cos(angle), -sin(angle)], // and the inverse rotation by Y axis is [sin(angle), cos(angle)]. const float exactInX = inXStart + cosAngle * outX + sinAngle * outY; const float exactInY = inYStart - sinAngle * outX + cosAngle * outY; const int32_t inX = static_cast<int32_t>(exactInX); const int32_t inY = static_cast<int32_t>(exactInY); // Shift to the output pixel out = out + outY * rowSizeOut + outX; // Note that we will be taking an average with next pixels, so next pixels need to be in the image too if ( inX < 0 || inX >= width - 1 || inY < 0 || inY >= height - 1 ) { *out = 0; // We do not actually know what is beyond the image, so set value to 0 } else { // Shift to the input pixel in = in + inY * rowSizeIn + inX; // Now we use a bilinear approximation to find the pixel intensity value. That is, we take an // average of pixels (inX, inY), (inX + 1, inY), (inX, inY + 1), and (inX + 1, inY + 1). // We add an offset of 0.5 so that conversion to integer is done using rounding. const float probX = exactInX - inX; const float probY = exactInY - inY; const float mean = *in * (1 - probX) * (1 - probY) + *(in + 1) * probX * (1 - probY) + *(in + rowSizeIn) * (1 - probX) * probY + *(in + rowSizeIn + 1) * probX * probY + 0.5f; *out = static_cast<uint8_t>(mean); } } } __global__ void setPixelCuda( uint8_t * in, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t x, uint32_t y, uint8_t value ) { if ( x < width && y < height ) { in[y * rowSize + x] = value; } } __global__ void setPixelCuda( uint8_t * in, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * pointX, uint32_t * pointY, uint32_t pointSize, uint32_t value ) { const uint32_t idPoint = blockIdx.x * blockDim.x + threadIdx.x; if ( idPoint < pointSize) { const uint32_t x = pointX[idPoint]; const uint32_t y = pointY[idPoint]; if ( x < width && y < height ) { in[y * rowSize + x] = value; } } } __global__ void subtractCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * in1X = in1 + y * rowSizeIn1 + x; const uint8_t * in2X = in2 + y * rowSizeIn2 + x; uint8_t * outX = out + y * rowSizeOut + x; (*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : 0; } } __global__ void thresholdCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height, uint8_t threshold ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { out[y * rowSizeOut + x] = (in[y * rowSizeIn + x] < threshold) ? 0 : 255; } } __global__ void thresholdCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height, uint8_t minThreshold, uint8_t maxThreshold ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t idIn = y * rowSizeIn + x; out[y * rowSizeOut + x] = ((in[idIn] < minThreshold) || (in[idIn] > maxThreshold)) ? 0 : 255; } } } namespace Image_Function_Cuda { Image AbsoluteDifference( const Image & in1, const Image & in2 ) { return Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, in2 ); } void AbsoluteDifference( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, in2, out ); } Image AbsoluteDifference( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void AbsoluteDifference( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( absoluteDifferenceCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image BitwiseAnd( const Image & in1, const Image & in2 ) { return Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, in2 ); } void BitwiseAnd( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, in2, out ); } Image BitwiseAnd( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void BitwiseAnd( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( bitwiseAndCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image BitwiseOr( const Image & in1, const Image & in2 ) { return Image_Function_Helper::BitwiseOr( BitwiseOr, in1, in2 ); } void BitwiseOr( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::BitwiseOr( BitwiseOr, in1, in2, out ); } Image BitwiseOr( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::BitwiseOr( BitwiseOr, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void BitwiseOr( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( bitwiseOrCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image BitwiseXor( const Image & in1, const Image & in2 ) { return Image_Function_Helper::BitwiseXor( BitwiseXor, in1, in2 ); } void BitwiseXor( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::BitwiseXor( BitwiseXor, in1, in2, out ); } Image BitwiseXor( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::BitwiseXor( BitwiseXor, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void BitwiseXor( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( bitwiseXorCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image ConvertToCuda( const Image & in ) { Image out = ImageCuda().generate( in.width(), in.height(), in.colorCount() ); ConvertToCuda( in, out ); return out; } void ConvertToCuda( const Image & in, Image & out ) { Image_Function::ValidateImageParameters( in ); Image_Function::ValidateImageParameters( out ); if ( in.width() != out.width() || in.height() != out.height() || in.colorCount() != out.colorCount() ) throw penguinVException( "Bad input parameters in image function" ); if ( in.alignment() == 1u || (in.rowSize() == in.width() * in.colorCount()) ) { const uint32_t size = in.rowSize() * in.height(); if ( !multiCuda::cudaSafeCheck( hipMemcpy( out.data(), in.data(), size * sizeof( uint8_t ), hipMemcpyHostToDevice ) ) ) throw penguinVException( "Cannot copy a memory to CUDA device" ); } else { if ( !multiCuda::cudaSafeCheck( hipMemcpy2D( out.data(), out.rowSize(), in.data(), in.rowSize(), in.colorCount() * in.width(), in.height(), hipMemcpyHostToDevice ) ) ) throw penguinVException( "Cannot copy a memory to CUDA device" ); } } Image ConvertFromCuda( const Image & in ) { Image out( in.width(), in.height(), in.colorCount(), 1u ); ConvertFromCuda( in, out ); return out; } void ConvertFromCuda(const Image & in, Image & out ) { Image_Function::ValidateImageParameters( in ); Image_Function::ValidateImageParameters( out ); if ( in.width() != out.width() || in.height() != out.height() || in.colorCount() != out.colorCount() ) throw penguinVException( "Bad input parameters in image function" ); if ( out.alignment() == 1u || (out.rowSize() == out.width() * out.colorCount()) ) { const uint32_t size = in.rowSize() * in.height(); if ( !multiCuda::cudaSafeCheck( hipMemcpy( out.data(), in.data(), size, hipMemcpyDeviceToHost ) ) ) throw penguinVException( "Cannot copy a memory from CUDA device" ); } else { if ( !multiCuda::cudaSafeCheck( hipMemcpy2D( out.data(), out.rowSize(), in.data(), in.rowSize(), in.colorCount() * in.width(), in.height(), hipMemcpyDeviceToHost ) ) ) throw penguinVException( "Cannot copy a memory to CUDA device" ); } } Image ConvertToGrayScale( const Image & in ) { return Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in ); } void ConvertToGrayScale( const Image & in, Image & out ) { Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in, out ); } Image ConvertToGrayScale( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height ) { return Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in, startXIn, startYIn, width, height ); } void ConvertToGrayScale( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( out ); if ( in.colorCount() == penguinV::GRAY_SCALE ) { Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); return; } const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t colorCount = in.colorCount(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut; launchKernel2D( convertToGrayScaleCuda, width, height, inY, rowSizeIn, colorCount, outY, rowSizeOut, width, height ); } Image ConvertToRgb( const Image & in ) { return Image_Function_Helper::ConvertToRgb( ConvertToRgb, in ); } void ConvertToRgb( const Image & in, Image & out ) { Image_Function_Helper::ConvertToRgb( ConvertToRgb, in, out ); } Image ConvertToRgb( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height ) { return Image_Function_Helper::ConvertToRgb( ConvertToRgb, in, startXIn, startYIn, width, height ); } void ConvertToRgb( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyRGBImage( out ); if ( in.colorCount() == penguinV::RGB ) { Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); return; } const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t colorCount = out.colorCount(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( convertToRgbCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, colorCount, width, height ); } void Copy( const Image & in, Image & out ) { Image_Function::ValidateImageParameters( in, out ); out = in; } Image Copy( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height ) { return Image_Function_Helper::Copy( Copy, in, startXIn, startYIn, width, height ); } void Copy( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in, out ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); width = width * colorCount; const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( copyCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height ); } Image ExtractChannel( const Image & in, uint8_t channelId ) { return Image_Function_Helper::ExtractChannel( ExtractChannel, in, channelId ); } void ExtractChannel( const Image & in, Image & out, uint8_t channelId ) { Image_Function_Helper::ExtractChannel( ExtractChannel, in, out, channelId ); } Image ExtractChannel( const Image & in, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t channelId ) { return Image_Function_Helper::ExtractChannel( ExtractChannel, in, x, y, width, height, channelId ); } void ExtractChannel( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, uint8_t channelId ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( out ); if ( channelId >= in.colorCount() ) throw penguinVException( "Channel ID for color image is greater than channel count in input image" ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t colorCount = in.colorCount(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount + channelId; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut; launchKernel2D( extractChannelCuda, width, height, inY, rowSizeIn, colorCount, outY, rowSizeOut, width, height ); } void Fill( Image & image, uint8_t value ) { image.fill( value ); } void Fill( Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t value ) { Image_Function::ValidateImageParameters( image, x, y, width, height ); Image_Function::VerifyGrayScaleImage( image ); const uint32_t rowSize = image.rowSize(); uint8_t * imageY = image.data() + y * rowSize + x; launchKernel2D( fillCuda, width, height, imageY, rowSize, width, height, value ); } Image Flip( const Image & in, bool horizontal, bool vertical ) { return Image_Function_Helper::Flip( Flip, in, horizontal, vertical ); } void Flip( const Image & in, Image & out, bool horizontal, bool vertical ) { Image_Function_Helper::Flip( Flip, in, out, horizontal, vertical ); } Image Flip( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, bool horizontal, bool vertical ) { return Image_Function_Helper::Flip( Flip, in, startXIn, startYIn, width, height, horizontal, vertical ); } void Flip( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, bool horizontal, bool vertical ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( in, out ); if ( !horizontal && !vertical ) { Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); } else { const uint8_t colorCount = in.colorCount(); width = width * colorCount; const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( flipCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height, horizontal, vertical ); } } Image GammaCorrection( const Image & in, double a, double gamma ) { return Image_Function_Helper::GammaCorrection( GammaCorrection, in, a, gamma ); } void GammaCorrection( const Image & in, Image & out, double a, double gamma ) { Image_Function_Helper::GammaCorrection( GammaCorrection, in, out, a, gamma ); } Image GammaCorrection( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, double a, double gamma ) { return Image_Function_Helper::GammaCorrection( GammaCorrection, in, startXIn, startYIn, width, height, a, gamma ); } void GammaCorrection( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, double a, double gamma ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( in, out ); const std::vector<uint8_t> & value = Image_Function_Helper::GetGammaCorrectionLookupTable( a, gamma ); LookupTable( in, startXIn, startYIn, out, startXOut, startYOut, width, height, value ); } uint8_t GetThreshold( const std::vector < uint32_t > & histogram ) { return Image_Function_Helper::GetThreshold( histogram ); } std::vector < uint32_t > Histogram( const Image & image ) { return Image_Function_Helper::Histogram( Histogram, image ); } void Histogram( const Image & image, std::vector < uint32_t > & histogram ) { Image_Function_Helper::Histogram( Histogram, image, histogram ); } std::vector < uint32_t > Histogram( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height ) { return Image_Function_Helper::Histogram( Histogram, image, x, y, width, height ); } void Histogram( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, std::vector < uint32_t > & histogram ) { Image_Function::ValidateImageParameters( image, x, y, width, height ); Image_Function::VerifyGrayScaleImage( image ); histogram.resize( 256u ); std::fill( histogram.begin(), histogram.end(), 0u ); const uint32_t rowSize = image.rowSize(); const uint8_t * imageY = image.data() + y * rowSize + x; multiCuda::Array< uint32_t > tableCuda( histogram ); launchKernel2D( histogramCuda, width, height, imageY, rowSize, width, height, tableCuda.data() ); histogram = tableCuda.get(); } Image Invert( const Image & in ) { return Image_Function_Helper::Invert( Invert, in ); } void Invert( const Image & in, Image & out ) { Image_Function_Helper::Invert( Invert, in, out ); } Image Invert( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height ) { return Image_Function_Helper::Invert( Invert, in, startXIn, startYIn, width, height ); } void Invert( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in, out ); width = width * colorCount; const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( invertCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height ); } bool IsEqual( const Image & in1, const Image & in2 ) { Image_Function::ValidateImageParameters( in1, in2 ); return IsEqual( in1, 0, 0, in2, 0, 0, in1.width(), in1.height() ); } bool IsEqual( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2 ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; multiCuda::Type< uint32_t > result( 1 ); launchKernel2D( isEqualCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, width, height, result.data() ); return ( result.get() != 0 ); } Image LookupTable( const Image & in, const std::vector < uint8_t > & table ) { return Image_Function_Helper::LookupTable( LookupTable, in, table ); } void LookupTable( const Image & in, Image & out, const std::vector < uint8_t > & table ) { Image_Function_Helper::LookupTable( LookupTable, in, out, table ); } Image LookupTable( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, const std::vector < uint8_t > & table ) { return Image_Function_Helper::LookupTable( LookupTable, in, startXIn, startYIn, width, height, table ); } void LookupTable( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, const std::vector < uint8_t > & table ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); if ( table.size() != 256u ) throw penguinVException( "Lookup table size is not equal to 256" ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in, out ); width = width * colorCount; const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; multiCuda::Array< uint8_t > tableCuda( table ); launchKernel2D( lookupTableCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height, tableCuda.data() ); } Image Maximum( const Image & in1, const Image & in2 ) { return Image_Function_Helper::Maximum( Maximum, in1, in2 ); } void Maximum( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::Maximum( Maximum, in1, in2, out ); } Image Maximum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::Maximum( Maximum, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void Maximum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( maximumCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image Minimum( const Image & in1, const Image & in2 ) { return Image_Function_Helper::Minimum( Minimum, in1, in2 ); } void Minimum( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::Minimum( Minimum, in1, in2, out ); } Image Minimum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::Minimum( Minimum, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void Minimum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( minimumCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } std::vector < uint32_t > ProjectionProfile( const Image & image, bool horizontal ) { return Image_Function_Helper::ProjectionProfile( ProjectionProfile, image, horizontal ); } void ProjectionProfile( const Image & image, bool horizontal, std::vector < uint32_t > & projection ) { ProjectionProfile( image, 0, 0, image.width(), image.height(), horizontal, projection ); } std::vector < uint32_t > ProjectionProfile( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, bool horizontal ) { return Image_Function_Helper::ProjectionProfile( ProjectionProfile, image, x, y, width, height, horizontal ); } void ProjectionProfile( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, bool horizontal, std::vector<uint32_t> & projection ) { Image_Function::ValidateImageParameters( image, x, y, width, height ); const uint8_t colorCount = image.colorCount(); width *= colorCount; projection.resize( horizontal ? width : height ); std::fill( projection.begin(), projection.end(), 0u ); const uint32_t rowSize = image.rowSize(); const uint8_t * imageX = image.data() + y * rowSize + x * colorCount; multiCuda::Array< uint32_t > projectionCuda( projection ); launchKernel2D( ( horizontal ? projectionProfileHorizontalCuda : projectionProfileVerticalCuda ), width, height, imageX, rowSize, width, height, projectionCuda.data()); projection = projectionCuda.get(); } void Rotate( const Image & in, float centerXIn, float centerYIn, Image & out, float centerXOut, float centerYOut, float angle ) { Image_Function::ValidateImageParameters( in, out ); Image_Function::VerifyGrayScaleImage( in, out ); const float cosAngle = cos( angle ); const float sinAngle = sin( angle ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint32_t width = in.width(); const uint32_t height = in.height(); uint8_t const * inMem = in.data(); uint8_t * outMem = out.data(); // We iterate over the output array in the usual manner; we iterate over the // input using inverse rotation of this shift. Doing so, we start the input // iteration at the following positions: const float inXStart = -( cosAngle * centerXOut + sinAngle * centerYOut) + centerXIn; const float inYStart = -(-sinAngle * centerXOut + cosAngle * centerYOut) + centerYIn; launchKernel2D( rotateCuda, width, height, inMem, rowSizeIn, outMem, rowSizeOut, inXStart, inYStart, width, height, cosAngle, sinAngle ); } void SetPixel( Image & image, uint32_t x, uint32_t y, uint8_t value ) { Image_Function::ValidateImageParameters( image ); if ( x >= image.width() || y >= image.height() ) throw penguinVException( "Bad input parameters in image function" ); launchKernel1D( setPixelCuda, 1, image.data(), image.rowSize(), image.width(), image.height(), x, y, value ); } void SetPixel( Image & image, const std::vector<uint32_t> & X, const std::vector<uint32_t> & Y, uint8_t value ) { Image_Function::ValidateImageParameters( image ); if ( X.size() != Y.size() ) throw penguinVException( "Bad input parameters in image function" ); if ( X.size() > 0 ) { const uint32_t width = image.width(); const uint32_t height = image.height(); for ( size_t i = 0; i < X.size(); ++i ) { if ( X[i] >= width || Y[i] >= height ) throw penguinVException( "Bad input parameters in image function" ); } multiCuda::Array<uint32_t> pointX( X ); multiCuda::Array<uint32_t> pointY( Y ); launchKernel1D( setPixelCuda, static_cast<uint32_t>( X.size() ), image.data(), image.rowSize(), width, height, pointX.data(), pointY.data(), pointX.size(), value ); } } Image Subtract( const Image & in1, const Image & in2 ) { return Image_Function_Helper::Subtract( Subtract, in1, in2 ); } void Subtract( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::Subtract( Subtract, in1, in2, out ); } Image Subtract( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::Subtract( Subtract, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void Subtract( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( subtractCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image Threshold( const Image & in, uint8_t threshold ) { return Image_Function_Helper::Threshold( Threshold, in, threshold ); } void Threshold( const Image & in, Image & out, uint8_t threshold ) { Image_Function_Helper::Threshold( Threshold, in, out, threshold ); } Image Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, uint8_t threshold ) { return Image_Function_Helper::Threshold( Threshold, in, startXIn, startYIn, width, height, threshold ); } void Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, uint8_t threshold ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( in, out ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut; launchKernel2D( thresholdCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height, threshold ); } Image Threshold( const Image & in, uint8_t minThreshold, uint8_t maxThreshold ) { return Image_Function_Helper::Threshold( Threshold, in, minThreshold, maxThreshold ); } void Threshold( const Image & in, Image & out, uint8_t minThreshold, uint8_t maxThreshold ) { Image_Function_Helper::Threshold( Threshold, in, out, minThreshold, maxThreshold ); } Image Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, uint8_t minThreshold, uint8_t maxThreshold ) { return Image_Function_Helper::Threshold( Threshold, in, startXIn, startYIn, width, height, minThreshold, maxThreshold ); } void Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, uint8_t minThreshold, uint8_t maxThreshold ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( in, out ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut; launchKernel2D( thresholdCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height, minThreshold, maxThreshold ); } }
a1ff1a0c919db470bd9ecf972247d0fa65272e28.cu
#include <cmath> #include <cuda_runtime.h> #include "image_function_cuda.cuh" #include "../parameter_validation.h" #include "../image_function_helper.h" #include "cuda_types.cuh" #include "cuda_helper.cuh" namespace { struct FunctionRegistrator { Image_Function_Helper::FunctionTableHolder table; FunctionRegistrator() { table.AbsoluteDifference = &Image_Function_Cuda::AbsoluteDifference; table.BitwiseAnd = &Image_Function_Cuda::BitwiseAnd; table.BitwiseOr = &Image_Function_Cuda::BitwiseOr; table.BitwiseXor = &Image_Function_Cuda::BitwiseXor; table.ConvertToGrayScale = &Image_Function_Cuda::ConvertToGrayScale; table.ConvertToRgb = &Image_Function_Cuda::ConvertToRgb; table.Copy = &Image_Function_Cuda::Copy; table.ExtractChannel = &Image_Function_Cuda::ExtractChannel; table.Fill = &Image_Function_Cuda::Fill; table.GammaCorrection = &Image_Function_Cuda::GammaCorrection; table.Histogram = &Image_Function_Cuda::Histogram; table.Invert = &Image_Function_Cuda::Invert; table.LookupTable = &Image_Function_Cuda::LookupTable; table.SetPixel = &Image_Function_Cuda::SetPixel; table.Maximum = &Image_Function_Cuda::Maximum; table.Minimum = &Image_Function_Cuda::Minimum; table.ProjectionProfile = &Image_Function_Cuda::ProjectionProfile; table.Subtract = &Image_Function_Cuda::Subtract; table.Threshold = &Image_Function_Cuda::Threshold; table.Threshold2 = &Image_Function_Cuda::Threshold; ImageTypeManager::instance().setFunctionTable( penguinV::ImageCuda().type(), table ); ImageTypeManager::instance().setConvertFunction( Image_Function_Cuda::ConvertToCuda, penguinV::Image(), penguinV::ImageCuda() ); ImageTypeManager::instance().setConvertFunction( Image_Function_Cuda::ConvertFromCuda, penguinV::ImageCuda(), penguinV::Image() ); } }; const FunctionRegistrator functionRegistrator; // The list of CUDA device functions on device side __global__ void absoluteDifferenceCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * in1X = in1 + y * rowSizeIn1 + x; const uint8_t * in2X = in2 + y * rowSizeIn2 + x; uint8_t * outX = out + y * rowSizeOut + x; (*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : ((*in2X) - (*in1X)); } } __global__ void bitwiseAndCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t idIn1 = y * rowSizeIn1 + x; const uint32_t idIn2 = y * rowSizeIn2 + x; const uint32_t idOut = y * rowSizeOut + x; out[idOut] = in1[idIn1] & in2[idIn2]; } } __global__ void bitwiseOrCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t idIn1 = y * rowSizeIn1 + x; const uint32_t idIn2 = y * rowSizeIn2 + x; const uint32_t idOut = y * rowSizeOut + x; out[idOut] = in1[idIn1] | in2[idIn2]; } } __global__ void bitwiseXorCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t idIn1 = y * rowSizeIn1 + x; const uint32_t idIn2 = y * rowSizeIn2 + x; const uint32_t idOut = y * rowSizeOut + x; out[idOut] = in1[idIn1] ^ in2[idIn2]; } } __global__ void convertToGrayScaleCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t colorCount, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * data = in + y * rowSizeIn + x * colorCount; const uint8_t * dataEnd = data + colorCount; uint32_t sum = 0; for ( ; data != dataEnd; ++data ) { sum += (*data); } const uint32_t id = y * rowSizeOut + x; out[id] = static_cast<uint8_t>(sum / colorCount); } } __global__ void convertToRgbCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint8_t colorCount, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * dataIn = in + y * rowSizeIn + x; uint8_t * dataOut = out + y * rowSizeOut + x * colorCount; const uint8_t * dataOutEnd = dataOut + colorCount; for ( ; dataOut != dataOutEnd; ++dataOut ) { (*dataOut) = (*dataIn); } } } __global__ void copyCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { out[y * rowSizeOut + x] = in[y * rowSizeIn + x]; } } __global__ void extractChannelCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t colorCount, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) out[y * rowSizeOut + x] = in[y * rowSizeIn + x * colorCount]; } __global__ void fillCuda( uint8_t * data, uint32_t rowSize, uint32_t width, uint32_t height, uint8_t value ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) data[y * rowSize + x] = value; } __global__ void flipCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height, bool horizontal, bool vertical ) { const uint32_t inX = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t inY = blockDim.y * blockIdx.y + threadIdx.y; if ( inX < width && inY < height ) { const uint32_t outX = horizontal ? (width - 1 - inX) : inX; const uint32_t outY = vertical ? (height - 1 - inY) : inY; out[outY * rowSizeOut + outX] = in[inY * rowSizeIn + inX]; } } __global__ void histogramCuda( const uint8_t * data, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * histogram ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t id = y * rowSize + x; atomicAdd( &histogram[data[id]], 1 ); } } __global__ void invertCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { out[y * rowSizeOut + x] = ~in[y * rowSizeIn + x]; } } __global__ void isEqualCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint32_t width, uint32_t height, uint32_t * isEqual ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t partsEqual = static_cast<uint32_t>( in1[y * rowSizeIn1 + x] == in2[y * rowSizeIn2 + x] ); atomicAnd( isEqual, partsEqual ); } } __global__ void lookupTableCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height, uint8_t * table ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { out[y * rowSizeOut + x] = table[in[y * rowSizeIn + x]]; } } __global__ void maximumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * in1X = in1 + y * rowSizeIn1 + x; const uint8_t * in2X = in2 + y * rowSizeIn2 + x; uint8_t * outX = out + y * rowSizeOut + x; (*outX) = ((*in1X) > ( *in2X )) ? (*in1X) : (*in2X); } } __global__ void minimumCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * in1X = in1 + y * rowSizeIn1 + x; const uint8_t * in2X = in2 + y * rowSizeIn2 + x; uint8_t * outX = out + y * rowSizeOut + x; (*outX) = ((*in1X) < (*in2X)) ? (*in1X) : (*in2X); } } __global__ void projectionProfileHorizontalCuda( const uint8_t * image, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * projection ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * imageX = image + y * rowSize + x; atomicAdd( &projection[x], (*imageX) ); } } __global__ void projectionProfileVerticalCuda( const uint8_t * image, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * projection ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * imageY = image + y * rowSize + x; atomicAdd( &projection[y], (*imageY) ); } } __global__ void rotateCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, float inXStart, float inYStart, uint32_t width, uint32_t height, float cosAngle, float sinAngle ) { uint32_t outX = blockDim.x * blockIdx.x + threadIdx.x; uint32_t outY = blockDim.y * blockIdx.y + threadIdx.y; // Only do something if this thread is for a valid pixel in the output if ( outX < width && outY < height ) { // Both input coordinates are shifted using the cosAngle, sinAngle, outX, and outY. The shift // comes from inverse rotating the horizontal and vertical iterations over the output. // Note that inverse rotation by X axis is [cos(angle), -sin(angle)], // and the inverse rotation by Y axis is [sin(angle), cos(angle)]. const float exactInX = inXStart + cosAngle * outX + sinAngle * outY; const float exactInY = inYStart - sinAngle * outX + cosAngle * outY; const int32_t inX = static_cast<int32_t>(exactInX); const int32_t inY = static_cast<int32_t>(exactInY); // Shift to the output pixel out = out + outY * rowSizeOut + outX; // Note that we will be taking an average with next pixels, so next pixels need to be in the image too if ( inX < 0 || inX >= width - 1 || inY < 0 || inY >= height - 1 ) { *out = 0; // We do not actually know what is beyond the image, so set value to 0 } else { // Shift to the input pixel in = in + inY * rowSizeIn + inX; // Now we use a bilinear approximation to find the pixel intensity value. That is, we take an // average of pixels (inX, inY), (inX + 1, inY), (inX, inY + 1), and (inX + 1, inY + 1). // We add an offset of 0.5 so that conversion to integer is done using rounding. const float probX = exactInX - inX; const float probY = exactInY - inY; const float mean = *in * (1 - probX) * (1 - probY) + *(in + 1) * probX * (1 - probY) + *(in + rowSizeIn) * (1 - probX) * probY + *(in + rowSizeIn + 1) * probX * probY + 0.5f; *out = static_cast<uint8_t>(mean); } } } __global__ void setPixelCuda( uint8_t * in, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t x, uint32_t y, uint8_t value ) { if ( x < width && y < height ) { in[y * rowSize + x] = value; } } __global__ void setPixelCuda( uint8_t * in, uint32_t rowSize, uint32_t width, uint32_t height, uint32_t * pointX, uint32_t * pointY, uint32_t pointSize, uint32_t value ) { const uint32_t idPoint = blockIdx.x * blockDim.x + threadIdx.x; if ( idPoint < pointSize) { const uint32_t x = pointX[idPoint]; const uint32_t y = pointY[idPoint]; if ( x < width && y < height ) { in[y * rowSize + x] = value; } } } __global__ void subtractCuda( const uint8_t * in1, uint32_t rowSizeIn1, const uint8_t * in2, uint32_t rowSizeIn2, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint8_t * in1X = in1 + y * rowSizeIn1 + x; const uint8_t * in2X = in2 + y * rowSizeIn2 + x; uint8_t * outX = out + y * rowSizeOut + x; (*outX) = ((*in1X) > ( *in2X )) ? ((*in1X) - (*in2X)) : 0; } } __global__ void thresholdCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height, uint8_t threshold ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { out[y * rowSizeOut + x] = (in[y * rowSizeIn + x] < threshold) ? 0 : 255; } } __global__ void thresholdCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, uint32_t width, uint32_t height, uint8_t minThreshold, uint8_t maxThreshold ) { const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y; if ( x < width && y < height ) { const uint32_t idIn = y * rowSizeIn + x; out[y * rowSizeOut + x] = ((in[idIn] < minThreshold) || (in[idIn] > maxThreshold)) ? 0 : 255; } } } namespace Image_Function_Cuda { Image AbsoluteDifference( const Image & in1, const Image & in2 ) { return Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, in2 ); } void AbsoluteDifference( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, in2, out ); } Image AbsoluteDifference( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::AbsoluteDifference( AbsoluteDifference, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void AbsoluteDifference( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( absoluteDifferenceCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image BitwiseAnd( const Image & in1, const Image & in2 ) { return Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, in2 ); } void BitwiseAnd( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, in2, out ); } Image BitwiseAnd( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::BitwiseAnd( BitwiseAnd, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void BitwiseAnd( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( bitwiseAndCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image BitwiseOr( const Image & in1, const Image & in2 ) { return Image_Function_Helper::BitwiseOr( BitwiseOr, in1, in2 ); } void BitwiseOr( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::BitwiseOr( BitwiseOr, in1, in2, out ); } Image BitwiseOr( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::BitwiseOr( BitwiseOr, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void BitwiseOr( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( bitwiseOrCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image BitwiseXor( const Image & in1, const Image & in2 ) { return Image_Function_Helper::BitwiseXor( BitwiseXor, in1, in2 ); } void BitwiseXor( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::BitwiseXor( BitwiseXor, in1, in2, out ); } Image BitwiseXor( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::BitwiseXor( BitwiseXor, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void BitwiseXor( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( bitwiseXorCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image ConvertToCuda( const Image & in ) { Image out = ImageCuda().generate( in.width(), in.height(), in.colorCount() ); ConvertToCuda( in, out ); return out; } void ConvertToCuda( const Image & in, Image & out ) { Image_Function::ValidateImageParameters( in ); Image_Function::ValidateImageParameters( out ); if ( in.width() != out.width() || in.height() != out.height() || in.colorCount() != out.colorCount() ) throw penguinVException( "Bad input parameters in image function" ); if ( in.alignment() == 1u || (in.rowSize() == in.width() * in.colorCount()) ) { const uint32_t size = in.rowSize() * in.height(); if ( !multiCuda::cudaSafeCheck( cudaMemcpy( out.data(), in.data(), size * sizeof( uint8_t ), cudaMemcpyHostToDevice ) ) ) throw penguinVException( "Cannot copy a memory to CUDA device" ); } else { if ( !multiCuda::cudaSafeCheck( cudaMemcpy2D( out.data(), out.rowSize(), in.data(), in.rowSize(), in.colorCount() * in.width(), in.height(), cudaMemcpyHostToDevice ) ) ) throw penguinVException( "Cannot copy a memory to CUDA device" ); } } Image ConvertFromCuda( const Image & in ) { Image out( in.width(), in.height(), in.colorCount(), 1u ); ConvertFromCuda( in, out ); return out; } void ConvertFromCuda(const Image & in, Image & out ) { Image_Function::ValidateImageParameters( in ); Image_Function::ValidateImageParameters( out ); if ( in.width() != out.width() || in.height() != out.height() || in.colorCount() != out.colorCount() ) throw penguinVException( "Bad input parameters in image function" ); if ( out.alignment() == 1u || (out.rowSize() == out.width() * out.colorCount()) ) { const uint32_t size = in.rowSize() * in.height(); if ( !multiCuda::cudaSafeCheck( cudaMemcpy( out.data(), in.data(), size, cudaMemcpyDeviceToHost ) ) ) throw penguinVException( "Cannot copy a memory from CUDA device" ); } else { if ( !multiCuda::cudaSafeCheck( cudaMemcpy2D( out.data(), out.rowSize(), in.data(), in.rowSize(), in.colorCount() * in.width(), in.height(), cudaMemcpyDeviceToHost ) ) ) throw penguinVException( "Cannot copy a memory to CUDA device" ); } } Image ConvertToGrayScale( const Image & in ) { return Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in ); } void ConvertToGrayScale( const Image & in, Image & out ) { Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in, out ); } Image ConvertToGrayScale( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height ) { return Image_Function_Helper::ConvertToGrayScale( ConvertToGrayScale, in, startXIn, startYIn, width, height ); } void ConvertToGrayScale( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( out ); if ( in.colorCount() == penguinV::GRAY_SCALE ) { Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); return; } const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t colorCount = in.colorCount(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut; launchKernel2D( convertToGrayScaleCuda, width, height, inY, rowSizeIn, colorCount, outY, rowSizeOut, width, height ); } Image ConvertToRgb( const Image & in ) { return Image_Function_Helper::ConvertToRgb( ConvertToRgb, in ); } void ConvertToRgb( const Image & in, Image & out ) { Image_Function_Helper::ConvertToRgb( ConvertToRgb, in, out ); } Image ConvertToRgb( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height ) { return Image_Function_Helper::ConvertToRgb( ConvertToRgb, in, startXIn, startYIn, width, height ); } void ConvertToRgb( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyRGBImage( out ); if ( in.colorCount() == penguinV::RGB ) { Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); return; } const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t colorCount = out.colorCount(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( convertToRgbCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, colorCount, width, height ); } void Copy( const Image & in, Image & out ) { Image_Function::ValidateImageParameters( in, out ); out = in; } Image Copy( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height ) { return Image_Function_Helper::Copy( Copy, in, startXIn, startYIn, width, height ); } void Copy( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in, out ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); width = width * colorCount; const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( copyCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height ); } Image ExtractChannel( const Image & in, uint8_t channelId ) { return Image_Function_Helper::ExtractChannel( ExtractChannel, in, channelId ); } void ExtractChannel( const Image & in, Image & out, uint8_t channelId ) { Image_Function_Helper::ExtractChannel( ExtractChannel, in, out, channelId ); } Image ExtractChannel( const Image & in, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t channelId ) { return Image_Function_Helper::ExtractChannel( ExtractChannel, in, x, y, width, height, channelId ); } void ExtractChannel( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, uint8_t channelId ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( out ); if ( channelId >= in.colorCount() ) throw penguinVException( "Channel ID for color image is greater than channel count in input image" ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t colorCount = in.colorCount(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount + channelId; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut; launchKernel2D( extractChannelCuda, width, height, inY, rowSizeIn, colorCount, outY, rowSizeOut, width, height ); } void Fill( Image & image, uint8_t value ) { image.fill( value ); } void Fill( Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, uint8_t value ) { Image_Function::ValidateImageParameters( image, x, y, width, height ); Image_Function::VerifyGrayScaleImage( image ); const uint32_t rowSize = image.rowSize(); uint8_t * imageY = image.data() + y * rowSize + x; launchKernel2D( fillCuda, width, height, imageY, rowSize, width, height, value ); } Image Flip( const Image & in, bool horizontal, bool vertical ) { return Image_Function_Helper::Flip( Flip, in, horizontal, vertical ); } void Flip( const Image & in, Image & out, bool horizontal, bool vertical ) { Image_Function_Helper::Flip( Flip, in, out, horizontal, vertical ); } Image Flip( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, bool horizontal, bool vertical ) { return Image_Function_Helper::Flip( Flip, in, startXIn, startYIn, width, height, horizontal, vertical ); } void Flip( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, bool horizontal, bool vertical ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( in, out ); if ( !horizontal && !vertical ) { Copy( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); } else { const uint8_t colorCount = in.colorCount(); width = width * colorCount; const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( flipCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height, horizontal, vertical ); } } Image GammaCorrection( const Image & in, double a, double gamma ) { return Image_Function_Helper::GammaCorrection( GammaCorrection, in, a, gamma ); } void GammaCorrection( const Image & in, Image & out, double a, double gamma ) { Image_Function_Helper::GammaCorrection( GammaCorrection, in, out, a, gamma ); } Image GammaCorrection( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, double a, double gamma ) { return Image_Function_Helper::GammaCorrection( GammaCorrection, in, startXIn, startYIn, width, height, a, gamma ); } void GammaCorrection( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, double a, double gamma ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( in, out ); const std::vector<uint8_t> & value = Image_Function_Helper::GetGammaCorrectionLookupTable( a, gamma ); LookupTable( in, startXIn, startYIn, out, startXOut, startYOut, width, height, value ); } uint8_t GetThreshold( const std::vector < uint32_t > & histogram ) { return Image_Function_Helper::GetThreshold( histogram ); } std::vector < uint32_t > Histogram( const Image & image ) { return Image_Function_Helper::Histogram( Histogram, image ); } void Histogram( const Image & image, std::vector < uint32_t > & histogram ) { Image_Function_Helper::Histogram( Histogram, image, histogram ); } std::vector < uint32_t > Histogram( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height ) { return Image_Function_Helper::Histogram( Histogram, image, x, y, width, height ); } void Histogram( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, std::vector < uint32_t > & histogram ) { Image_Function::ValidateImageParameters( image, x, y, width, height ); Image_Function::VerifyGrayScaleImage( image ); histogram.resize( 256u ); std::fill( histogram.begin(), histogram.end(), 0u ); const uint32_t rowSize = image.rowSize(); const uint8_t * imageY = image.data() + y * rowSize + x; multiCuda::Array< uint32_t > tableCuda( histogram ); launchKernel2D( histogramCuda, width, height, imageY, rowSize, width, height, tableCuda.data() ); histogram = tableCuda.get(); } Image Invert( const Image & in ) { return Image_Function_Helper::Invert( Invert, in ); } void Invert( const Image & in, Image & out ) { Image_Function_Helper::Invert( Invert, in, out ); } Image Invert( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height ) { return Image_Function_Helper::Invert( Invert, in, startXIn, startYIn, width, height ); } void Invert( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in, out ); width = width * colorCount; const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( invertCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height ); } bool IsEqual( const Image & in1, const Image & in2 ) { Image_Function::ValidateImageParameters( in1, in2 ); return IsEqual( in1, 0, 0, in2, 0, 0, in1.width(), in1.height() ); } bool IsEqual( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2 ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; multiCuda::Type< uint32_t > result( 1 ); launchKernel2D( isEqualCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, width, height, result.data() ); return ( result.get() != 0 ); } Image LookupTable( const Image & in, const std::vector < uint8_t > & table ) { return Image_Function_Helper::LookupTable( LookupTable, in, table ); } void LookupTable( const Image & in, Image & out, const std::vector < uint8_t > & table ) { Image_Function_Helper::LookupTable( LookupTable, in, out, table ); } Image LookupTable( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, const std::vector < uint8_t > & table ) { return Image_Function_Helper::LookupTable( LookupTable, in, startXIn, startYIn, width, height, table ); } void LookupTable( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, const std::vector < uint8_t > & table ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); if ( table.size() != 256u ) throw penguinVException( "Lookup table size is not equal to 256" ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in, out ); width = width * colorCount; const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; multiCuda::Array< uint8_t > tableCuda( table ); launchKernel2D( lookupTableCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height, tableCuda.data() ); } Image Maximum( const Image & in1, const Image & in2 ) { return Image_Function_Helper::Maximum( Maximum, in1, in2 ); } void Maximum( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::Maximum( Maximum, in1, in2, out ); } Image Maximum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::Maximum( Maximum, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void Maximum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( maximumCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image Minimum( const Image & in1, const Image & in2 ) { return Image_Function_Helper::Minimum( Minimum, in1, in2 ); } void Minimum( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::Minimum( Minimum, in1, in2, out ); } Image Minimum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::Minimum( Minimum, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void Minimum( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( minimumCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } std::vector < uint32_t > ProjectionProfile( const Image & image, bool horizontal ) { return Image_Function_Helper::ProjectionProfile( ProjectionProfile, image, horizontal ); } void ProjectionProfile( const Image & image, bool horizontal, std::vector < uint32_t > & projection ) { ProjectionProfile( image, 0, 0, image.width(), image.height(), horizontal, projection ); } std::vector < uint32_t > ProjectionProfile( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, bool horizontal ) { return Image_Function_Helper::ProjectionProfile( ProjectionProfile, image, x, y, width, height, horizontal ); } void ProjectionProfile( const Image & image, uint32_t x, uint32_t y, uint32_t width, uint32_t height, bool horizontal, std::vector<uint32_t> & projection ) { Image_Function::ValidateImageParameters( image, x, y, width, height ); const uint8_t colorCount = image.colorCount(); width *= colorCount; projection.resize( horizontal ? width : height ); std::fill( projection.begin(), projection.end(), 0u ); const uint32_t rowSize = image.rowSize(); const uint8_t * imageX = image.data() + y * rowSize + x * colorCount; multiCuda::Array< uint32_t > projectionCuda( projection ); launchKernel2D( ( horizontal ? projectionProfileHorizontalCuda : projectionProfileVerticalCuda ), width, height, imageX, rowSize, width, height, projectionCuda.data()); projection = projectionCuda.get(); } void Rotate( const Image & in, float centerXIn, float centerYIn, Image & out, float centerXOut, float centerYOut, float angle ) { Image_Function::ValidateImageParameters( in, out ); Image_Function::VerifyGrayScaleImage( in, out ); const float cosAngle = cos( angle ); const float sinAngle = sin( angle ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint32_t width = in.width(); const uint32_t height = in.height(); uint8_t const * inMem = in.data(); uint8_t * outMem = out.data(); // We iterate over the output array in the usual manner; we iterate over the // input using inverse rotation of this shift. Doing so, we start the input // iteration at the following positions: const float inXStart = -( cosAngle * centerXOut + sinAngle * centerYOut) + centerXIn; const float inYStart = -(-sinAngle * centerXOut + cosAngle * centerYOut) + centerYIn; launchKernel2D( rotateCuda, width, height, inMem, rowSizeIn, outMem, rowSizeOut, inXStart, inYStart, width, height, cosAngle, sinAngle ); } void SetPixel( Image & image, uint32_t x, uint32_t y, uint8_t value ) { Image_Function::ValidateImageParameters( image ); if ( x >= image.width() || y >= image.height() ) throw penguinVException( "Bad input parameters in image function" ); launchKernel1D( setPixelCuda, 1, image.data(), image.rowSize(), image.width(), image.height(), x, y, value ); } void SetPixel( Image & image, const std::vector<uint32_t> & X, const std::vector<uint32_t> & Y, uint8_t value ) { Image_Function::ValidateImageParameters( image ); if ( X.size() != Y.size() ) throw penguinVException( "Bad input parameters in image function" ); if ( X.size() > 0 ) { const uint32_t width = image.width(); const uint32_t height = image.height(); for ( size_t i = 0; i < X.size(); ++i ) { if ( X[i] >= width || Y[i] >= height ) throw penguinVException( "Bad input parameters in image function" ); } multiCuda::Array<uint32_t> pointX( X ); multiCuda::Array<uint32_t> pointY( Y ); launchKernel1D( setPixelCuda, static_cast<uint32_t>( X.size() ), image.data(), image.rowSize(), width, height, pointX.data(), pointY.data(), pointX.size(), value ); } } Image Subtract( const Image & in1, const Image & in2 ) { return Image_Function_Helper::Subtract( Subtract, in1, in2 ); } void Subtract( const Image & in1, const Image & in2, Image & out ) { Image_Function_Helper::Subtract( Subtract, in1, in2, out ); } Image Subtract( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, uint32_t width, uint32_t height ) { return Image_Function_Helper::Subtract( Subtract, in1, startX1, startY1, in2, startX2, startY2, width, height ); } void Subtract( const Image & in1, uint32_t startX1, uint32_t startY1, const Image & in2, uint32_t startX2, uint32_t startY2, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height ) { Image_Function::ValidateImageParameters( in1, startX1, startY1, in2, startX2, startY2, out, startXOut, startYOut, width, height ); const uint8_t colorCount = Image_Function::CheckCommonColorCount( in1, in2, out ); width = width * colorCount; const uint32_t rowSizeIn1 = in1.rowSize(); const uint32_t rowSizeIn2 = in2.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * in1Y = in1.data() + startY1 * rowSizeIn1 + startX1 * colorCount; const uint8_t * in2Y = in2.data() + startY2 * rowSizeIn2 + startX2 * colorCount; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut * colorCount; launchKernel2D( subtractCuda, width, height, in1Y, rowSizeIn1, in2Y, rowSizeIn2, outY, rowSizeOut, width, height ); } Image Threshold( const Image & in, uint8_t threshold ) { return Image_Function_Helper::Threshold( Threshold, in, threshold ); } void Threshold( const Image & in, Image & out, uint8_t threshold ) { Image_Function_Helper::Threshold( Threshold, in, out, threshold ); } Image Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, uint8_t threshold ) { return Image_Function_Helper::Threshold( Threshold, in, startXIn, startYIn, width, height, threshold ); } void Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, uint8_t threshold ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( in, out ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut; launchKernel2D( thresholdCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height, threshold ); } Image Threshold( const Image & in, uint8_t minThreshold, uint8_t maxThreshold ) { return Image_Function_Helper::Threshold( Threshold, in, minThreshold, maxThreshold ); } void Threshold( const Image & in, Image & out, uint8_t minThreshold, uint8_t maxThreshold ) { Image_Function_Helper::Threshold( Threshold, in, out, minThreshold, maxThreshold ); } Image Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, uint32_t width, uint32_t height, uint8_t minThreshold, uint8_t maxThreshold ) { return Image_Function_Helper::Threshold( Threshold, in, startXIn, startYIn, width, height, minThreshold, maxThreshold ); } void Threshold( const Image & in, uint32_t startXIn, uint32_t startYIn, Image & out, uint32_t startXOut, uint32_t startYOut, uint32_t width, uint32_t height, uint8_t minThreshold, uint8_t maxThreshold ) { Image_Function::ValidateImageParameters( in, startXIn, startYIn, out, startXOut, startYOut, width, height ); Image_Function::VerifyGrayScaleImage( in, out ); const uint32_t rowSizeIn = in.rowSize(); const uint32_t rowSizeOut = out.rowSize(); const uint8_t * inY = in.data() + startYIn * rowSizeIn + startXIn; uint8_t * outY = out.data() + startYOut * rowSizeOut + startXOut; launchKernel2D( thresholdCuda, width, height, inY, rowSizeIn, outY, rowSizeOut, width, height, minThreshold, maxThreshold ); } }
eb7d0318e287ddb7b6b8b371221a8bba62c42795.hip
// !!! This is a file automatically generated by hipify!!! #pragma GCC diagnostic ignored "-Wshift-count-overflow" #pragma GCC diagnostic ignored "-Wshift-count-negative" #include <memory.h> #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include "md5.h" #define F(x, y, z) (z ^ (x & (y ^ z))) #define G(x, y, z) (y ^ (z & (x ^ y))) #define H(x, y, z) (x ^ y ^ z) #define I(x, y, z) (y ^ (x | ~z)) #define ROTATE_LEFT(x, s) (x<<s | x>>(32-s)) #define STEP(f, a, b, c, d, x, t, s) ( \ a += f(b, c, d) + x + t, \ a = ROTATE_LEFT(a, s), \ a += b \ ) __device__ void md5_init(struct md5_context* ctx) { ctx->a = 0x67452301; ctx->b = 0xefcdab89; ctx->c = 0x98badcfe; ctx->d = 0x10325476; ctx->count[0] = 0; ctx->count[1] = 0; } __device__ uint8_t* md5_transform(struct md5_context* ctx, const void* data, uintmax_t size) { uint8_t* ptr = (uint8_t*) data; uint32_t a, b, c, d, aa, bb, cc, dd; #define GET(n) (ctx->block[(n)]) #define SET(n) (ctx->block[(n)] = \ ((uint32_t)ptr[(n)*4 + 0] << 0 ) \ | ((uint32_t)ptr[(n)*4 + 1] << 8 ) \ | ((uint32_t)ptr[(n)*4 + 2] << 16) \ | ((uint32_t)ptr[(n)*4 + 3] << 24) ) a = ctx->a; b = ctx->b; c = ctx->c; d = ctx->d; do { aa = a; bb = b; cc = c; dd = d; STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7); STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12); STEP(F, c, d, a, b, SET(2), 0x242070db, 17); STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22); STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7); STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12); STEP(F, c, d, a, b, SET(6), 0xa8304613, 17); STEP(F, b, c, d, a, SET(7), 0xfd469501, 22); STEP(F, a, b, c, d, SET(8), 0x698098d8, 7); STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12); STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17); STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22); STEP(F, a, b, c, d, SET(12), 0x6b901122, 7); STEP(F, d, a, b, c, SET(13), 0xfd987193, 12); STEP(F, c, d, a, b, SET(14), 0xa679438e, 17); STEP(F, b, c, d, a, SET(15), 0x49b40821, 22); STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5); STEP(G, d, a, b, c, GET(6), 0xc040b340, 9); STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14); STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20); STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5); STEP(G, d, a, b, c, GET(10), 0x02441453, 9); STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14); STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20); STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5); STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9); STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14); STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20); STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5); STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9); STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14); STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20); STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4); STEP(H, d, a, b, c, GET(8), 0x8771f681, 11); STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16); STEP(H, b, c, d, a, GET(14), 0xfde5380c, 23); STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4); STEP(H, d, a, b, c, GET(4), 0x4bdecfa9, 11); STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16); STEP(H, b, c, d, a, GET(10), 0xbebfbc70, 23); STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4); STEP(H, d, a, b, c, GET(0), 0xeaa127fa, 11); STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16); STEP(H, b, c, d, a, GET(6), 0x04881d05, 23); STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4); STEP(H, d, a, b, c, GET(12), 0xe6db99e5, 11); STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16); STEP(H, b, c, d, a, GET(2), 0xc4ac5665, 23); STEP(I, a, b, c, d, GET(0), 0xf4292244, 6); STEP(I, d, a, b, c, GET(7), 0x432aff97, 10); STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15); STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21); STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6); STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10); STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15); STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21); STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6); STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10); STEP(I, c, d, a, b, GET(6), 0xa3014314, 15); STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21); STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6); STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10); STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15); STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21); a += aa; b += bb; c += cc; d += dd; ptr += 64; } while (size -= 64); ctx->a = a; ctx->b = b; ctx->c = c; ctx->d = d; #undef GET #undef SET return ptr; } __device__ void md5_update(struct md5_context* ctx, const void* buffer, uint32_t buffer_size) { uint32_t saved_low = ctx->count[0]; uint32_t used; uint32_t free; if ((ctx->count[0] = ((saved_low+buffer_size) & 0x1fffffff)) < saved_low) { ctx->count[1]++; } ctx->count[1] += (uint32_t)(buffer_size>>29); used = saved_low & 0x3f; if (used) { free = 64 - used; if (buffer_size < free) { memcpy(&ctx->input[used], buffer, buffer_size); return; } memcpy(&ctx->input[used], buffer, free); buffer = (uint8_t*) buffer + free; buffer_size -= free; md5_transform(ctx, ctx->input, 64); } if (buffer_size >= 64) { buffer = md5_transform(ctx, buffer, buffer_size & ~(unsigned long)0x3f); } memcpy(ctx->input, buffer, buffer_size); } __device__ void md5_finalize(struct md5_context* ctx, struct md5_digest* digest) { uint32_t used = ctx->count[0] & 0x3f; ctx->input[used++] = 0x80; uint32_t free = 64 - used; if (free < 8) { memset(&ctx->input[used], 0, free); md5_transform(ctx, ctx->input, 64); used = 0; free = 64; } memset(&ctx->input[used], 0, free - 8); ctx->count[0] <<= 3; ctx->input[56] = (uint8_t)(ctx->count[0]); ctx->input[57] = (uint8_t)(ctx->count[0] >> 8); ctx->input[58] = (uint8_t)(ctx->count[0] >> 16); ctx->input[59] = (uint8_t)(ctx->count[0] >> 24); ctx->input[60] = (uint8_t)(ctx->count[1]); ctx->input[61] = (uint8_t)(ctx->count[1] >> 8); ctx->input[62] = (uint8_t)(ctx->count[1] >> 16); ctx->input[63] = (uint8_t)(ctx->count[1] >> 24); md5_transform(ctx, ctx->input, 64); digest->bytes[0] = (uint8_t)(ctx->a); digest->bytes[1] = (uint8_t)(ctx->a >> 8); digest->bytes[2] = (uint8_t)(ctx->a >> 16); digest->bytes[3] = (uint8_t)(ctx->a >> 24); digest->bytes[4] = (uint8_t)(ctx->b); digest->bytes[5] = (uint8_t)(ctx->b >> 8); digest->bytes[6] = (uint8_t)(ctx->b >> 16); digest->bytes[7] = (uint8_t)(ctx->b >> 24); digest->bytes[8] = (uint8_t)(ctx->c); digest->bytes[9] = (uint8_t)(ctx->c >> 8); digest->bytes[10] = (uint8_t)(ctx->c >> 16); digest->bytes[11] = (uint8_t)(ctx->c >> 24); digest->bytes[12] = (uint8_t)(ctx->d); digest->bytes[13] = (uint8_t)(ctx->d >> 8); digest->bytes[14] = (uint8_t)(ctx->d >> 16); digest->bytes[15] = (uint8_t)(ctx->d >> 24); } __global__ void md5(const char* input, const uint32_t input_len, unsigned char* result) { struct md5_context context; struct md5_digest digest; md5_init(&context); md5_update(&context, input, input_len); md5_finalize(&context, &digest); for (int i = 0; i < sizeof(digest); i++){ result[i] = (unsigned char)digest.bytes[i]; } result[sizeof(digest)] = '\0'; }
eb7d0318e287ddb7b6b8b371221a8bba62c42795.cu
#pragma GCC diagnostic ignored "-Wshift-count-overflow" #pragma GCC diagnostic ignored "-Wshift-count-negative" #include <memory.h> #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include "md5.h" #define F(x, y, z) (z ^ (x & (y ^ z))) #define G(x, y, z) (y ^ (z & (x ^ y))) #define H(x, y, z) (x ^ y ^ z) #define I(x, y, z) (y ^ (x | ~z)) #define ROTATE_LEFT(x, s) (x<<s | x>>(32-s)) #define STEP(f, a, b, c, d, x, t, s) ( \ a += f(b, c, d) + x + t, \ a = ROTATE_LEFT(a, s), \ a += b \ ) __device__ void md5_init(struct md5_context* ctx) { ctx->a = 0x67452301; ctx->b = 0xefcdab89; ctx->c = 0x98badcfe; ctx->d = 0x10325476; ctx->count[0] = 0; ctx->count[1] = 0; } __device__ uint8_t* md5_transform(struct md5_context* ctx, const void* data, uintmax_t size) { uint8_t* ptr = (uint8_t*) data; uint32_t a, b, c, d, aa, bb, cc, dd; #define GET(n) (ctx->block[(n)]) #define SET(n) (ctx->block[(n)] = \ ((uint32_t)ptr[(n)*4 + 0] << 0 ) \ | ((uint32_t)ptr[(n)*4 + 1] << 8 ) \ | ((uint32_t)ptr[(n)*4 + 2] << 16) \ | ((uint32_t)ptr[(n)*4 + 3] << 24) ) a = ctx->a; b = ctx->b; c = ctx->c; d = ctx->d; do { aa = a; bb = b; cc = c; dd = d; STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7); STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12); STEP(F, c, d, a, b, SET(2), 0x242070db, 17); STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22); STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7); STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12); STEP(F, c, d, a, b, SET(6), 0xa8304613, 17); STEP(F, b, c, d, a, SET(7), 0xfd469501, 22); STEP(F, a, b, c, d, SET(8), 0x698098d8, 7); STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12); STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17); STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22); STEP(F, a, b, c, d, SET(12), 0x6b901122, 7); STEP(F, d, a, b, c, SET(13), 0xfd987193, 12); STEP(F, c, d, a, b, SET(14), 0xa679438e, 17); STEP(F, b, c, d, a, SET(15), 0x49b40821, 22); STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5); STEP(G, d, a, b, c, GET(6), 0xc040b340, 9); STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14); STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20); STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5); STEP(G, d, a, b, c, GET(10), 0x02441453, 9); STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14); STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20); STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5); STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9); STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14); STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20); STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5); STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9); STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14); STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20); STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4); STEP(H, d, a, b, c, GET(8), 0x8771f681, 11); STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16); STEP(H, b, c, d, a, GET(14), 0xfde5380c, 23); STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4); STEP(H, d, a, b, c, GET(4), 0x4bdecfa9, 11); STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16); STEP(H, b, c, d, a, GET(10), 0xbebfbc70, 23); STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4); STEP(H, d, a, b, c, GET(0), 0xeaa127fa, 11); STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16); STEP(H, b, c, d, a, GET(6), 0x04881d05, 23); STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4); STEP(H, d, a, b, c, GET(12), 0xe6db99e5, 11); STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16); STEP(H, b, c, d, a, GET(2), 0xc4ac5665, 23); STEP(I, a, b, c, d, GET(0), 0xf4292244, 6); STEP(I, d, a, b, c, GET(7), 0x432aff97, 10); STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15); STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21); STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6); STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10); STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15); STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21); STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6); STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10); STEP(I, c, d, a, b, GET(6), 0xa3014314, 15); STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21); STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6); STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10); STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15); STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21); a += aa; b += bb; c += cc; d += dd; ptr += 64; } while (size -= 64); ctx->a = a; ctx->b = b; ctx->c = c; ctx->d = d; #undef GET #undef SET return ptr; } __device__ void md5_update(struct md5_context* ctx, const void* buffer, uint32_t buffer_size) { uint32_t saved_low = ctx->count[0]; uint32_t used; uint32_t free; if ((ctx->count[0] = ((saved_low+buffer_size) & 0x1fffffff)) < saved_low) { ctx->count[1]++; } ctx->count[1] += (uint32_t)(buffer_size>>29); used = saved_low & 0x3f; if (used) { free = 64 - used; if (buffer_size < free) { memcpy(&ctx->input[used], buffer, buffer_size); return; } memcpy(&ctx->input[used], buffer, free); buffer = (uint8_t*) buffer + free; buffer_size -= free; md5_transform(ctx, ctx->input, 64); } if (buffer_size >= 64) { buffer = md5_transform(ctx, buffer, buffer_size & ~(unsigned long)0x3f); } memcpy(ctx->input, buffer, buffer_size); } __device__ void md5_finalize(struct md5_context* ctx, struct md5_digest* digest) { uint32_t used = ctx->count[0] & 0x3f; ctx->input[used++] = 0x80; uint32_t free = 64 - used; if (free < 8) { memset(&ctx->input[used], 0, free); md5_transform(ctx, ctx->input, 64); used = 0; free = 64; } memset(&ctx->input[used], 0, free - 8); ctx->count[0] <<= 3; ctx->input[56] = (uint8_t)(ctx->count[0]); ctx->input[57] = (uint8_t)(ctx->count[0] >> 8); ctx->input[58] = (uint8_t)(ctx->count[0] >> 16); ctx->input[59] = (uint8_t)(ctx->count[0] >> 24); ctx->input[60] = (uint8_t)(ctx->count[1]); ctx->input[61] = (uint8_t)(ctx->count[1] >> 8); ctx->input[62] = (uint8_t)(ctx->count[1] >> 16); ctx->input[63] = (uint8_t)(ctx->count[1] >> 24); md5_transform(ctx, ctx->input, 64); digest->bytes[0] = (uint8_t)(ctx->a); digest->bytes[1] = (uint8_t)(ctx->a >> 8); digest->bytes[2] = (uint8_t)(ctx->a >> 16); digest->bytes[3] = (uint8_t)(ctx->a >> 24); digest->bytes[4] = (uint8_t)(ctx->b); digest->bytes[5] = (uint8_t)(ctx->b >> 8); digest->bytes[6] = (uint8_t)(ctx->b >> 16); digest->bytes[7] = (uint8_t)(ctx->b >> 24); digest->bytes[8] = (uint8_t)(ctx->c); digest->bytes[9] = (uint8_t)(ctx->c >> 8); digest->bytes[10] = (uint8_t)(ctx->c >> 16); digest->bytes[11] = (uint8_t)(ctx->c >> 24); digest->bytes[12] = (uint8_t)(ctx->d); digest->bytes[13] = (uint8_t)(ctx->d >> 8); digest->bytes[14] = (uint8_t)(ctx->d >> 16); digest->bytes[15] = (uint8_t)(ctx->d >> 24); } __global__ void md5(const char* input, const uint32_t input_len, unsigned char* result) { struct md5_context context; struct md5_digest digest; md5_init(&context); md5_update(&context, input, input_len); md5_finalize(&context, &digest); for (int i = 0; i < sizeof(digest); i++){ result[i] = (unsigned char)digest.bytes[i]; } result[sizeof(digest)] = '\0'; }
39b26eb4d17b38c7770d59c10629103319b9486f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * \brief The Caffe layer that implements the CRF-RNN described in the paper: * Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015. * * \authors Sadeep Jayasumana, Bernardino Romera-Paredes, Shuai Zheng, Zhizhong Su. * \version 1.0 * \date 2015 * \copyright Torr Vision Group, University of Oxford. * \details If you use this code, please consider citing the paper: * Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du, * Chang Huang, Philip H. S. Torr. Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015. * * For more information about CRF-RNN, please visit the project website http://crfasrnn.torr.vision. */ #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/layers/multi_stage_meanfield_layer.hpp" namespace caffe { // Avoid divergence by uncoalescing access template <typename Dtype> __global__ void computeBilateralKernel(const int num_pixels_, const Dtype* const rgb_blob, const int width_, const int height_, const int channels_, float theta_alpha_, float theta_beta_, const int n, float* const output_kernel) { int offset = ((n * channels_ ) * height_) * width_ ; CUDA_KERNEL_LOOP(p, num_pixels_) { output_kernel[5 * p] = (float)(p % width_) / theta_alpha_; output_kernel[5 * p + 1] = (float)(p / width_) / theta_alpha_; const Dtype * const rgb_data_start = rgb_blob + offset; output_kernel[5 * p + 2] = (float)(rgb_data_start[p] / theta_beta_); output_kernel[5 * p + 3] = (float)((rgb_data_start + num_pixels_)[p] / theta_beta_); output_kernel[5 * p + 4] = (float)((rgb_data_start + num_pixels_ * 2)[p] / theta_beta_); } } template <typename Dtype> __global__ void computeNorm(Dtype* norm_output_data, int num_pixels){ CUDA_KERNEL_LOOP(i, num_pixels) { norm_output_data[i] = 1.f / (norm_output_data[i] + 1e-20f); } } /** * Performs filter-based mean field inference given the image and unaries. * * bottom[0] - Unary terms * bottom[1] - Softmax input/Output from the previous iteration (a copy of the unary terms if this is the first stage). * bottom[2] - RGB images * * top[0] - Output of the mean field inference (not normalized). */ template <typename Dtype> void MultiStageMeanfieldLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if(init_cpu) LOG(FATAL) << ("You initialize your network on CPU, please initialize it on GPU."); const Dtype* bottom_data = bottom[2]->gpu_data() ; split_layer_bottom_vec_[0] = bottom[0]; split_layer_->Forward(split_layer_bottom_vec_, split_layer_top_vec_); // Initialize the bilateral lattices. bilateral_lattices_.resize(num_); for (int n = 0; n < num_; ++n) { hipLaunchKernelGGL(( computeBilateralKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_pixels_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_pixels_, bottom_data, width_, height_, channels_, theta_alpha_, theta_beta_, n, bilateral_kernel_buffer_); CUDA_POST_KERNEL_CHECK; bilateral_lattices_[n].reset(new ModifiedPermutohedral()); bilateral_lattices_[n]->init(bilateral_kernel_buffer_, 5, width_, height_); // Calculate bilateral filter normalization factors. Dtype* norm_output_data = bilateral_norms_.mutable_gpu_data() + bilateral_norms_.offset(n); bilateral_lattices_[n]->compute(norm_output_data, norm_feed_, 1); hipLaunchKernelGGL(( computeNorm<Dtype>), dim3(CAFFE_GET_BLOCKS(num_pixels_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, norm_output_data, num_pixels_); CUDA_POST_KERNEL_CHECK; } for (int i = 0; i < num_iterations_; ++i) { meanfield_iterations_[i]->PrePass(this->blobs_, &bilateral_lattices_, &bilateral_norms_); meanfield_iterations_[i]->Forward_gpu(); } } /** * Backprop through filter-based mean field inference. */ template<typename Dtype> void MultiStageMeanfieldLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if(init_cpu) LOG(FATAL) << ("You initialize your network on CPU, please initialize it on GPU."); for (int i = (num_iterations_ - 1); i >= 0; --i) { meanfield_iterations_[i]->Backward_gpu(); } vector<bool> split_layer_propagate_down(1, true); split_layer_->Backward(split_layer_top_vec_, split_layer_propagate_down, split_layer_bottom_vec_); // Accumulate diffs from mean field iterations. for (int blob_id = 0; blob_id < this->blobs_.size(); ++blob_id) { Blob<Dtype>* cur_blob = this->blobs_[blob_id].get(); if (this->param_propagate_down_[blob_id]) { caffe_gpu_set(cur_blob->count(), Dtype(0), cur_blob->mutable_gpu_diff()); for (int i = 0; i < num_iterations_; ++i) { const Dtype* diffs_to_add = meanfield_iterations_[i]->blobs()[blob_id]->gpu_diff(); caffe_gpu_axpy(cur_blob->count(), Dtype(1.), diffs_to_add, cur_blob->mutable_gpu_diff()); } } } } INSTANTIATE_LAYER_GPU_FUNCS(MultiStageMeanfieldLayer); } // namespace caffe
39b26eb4d17b38c7770d59c10629103319b9486f.cu
/*! * \brief The Caffe layer that implements the CRF-RNN described in the paper: * Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015. * * \authors Sadeep Jayasumana, Bernardino Romera-Paredes, Shuai Zheng, Zhizhong Su. * \version 1.0 * \date 2015 * \copyright Torr Vision Group, University of Oxford. * \details If you use this code, please consider citing the paper: * Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du, * Chang Huang, Philip H. S. Torr. Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015. * * For more information about CRF-RNN, please visit the project website http://crfasrnn.torr.vision. */ #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/layers/multi_stage_meanfield_layer.hpp" namespace caffe { // Avoid divergence by uncoalescing access template <typename Dtype> __global__ void computeBilateralKernel(const int num_pixels_, const Dtype* const rgb_blob, const int width_, const int height_, const int channels_, float theta_alpha_, float theta_beta_, const int n, float* const output_kernel) { int offset = ((n * channels_ ) * height_) * width_ ; CUDA_KERNEL_LOOP(p, num_pixels_) { output_kernel[5 * p] = (float)(p % width_) / theta_alpha_; output_kernel[5 * p + 1] = (float)(p / width_) / theta_alpha_; const Dtype * const rgb_data_start = rgb_blob + offset; output_kernel[5 * p + 2] = (float)(rgb_data_start[p] / theta_beta_); output_kernel[5 * p + 3] = (float)((rgb_data_start + num_pixels_)[p] / theta_beta_); output_kernel[5 * p + 4] = (float)((rgb_data_start + num_pixels_ * 2)[p] / theta_beta_); } } template <typename Dtype> __global__ void computeNorm(Dtype* norm_output_data, int num_pixels){ CUDA_KERNEL_LOOP(i, num_pixels) { norm_output_data[i] = 1.f / (norm_output_data[i] + 1e-20f); } } /** * Performs filter-based mean field inference given the image and unaries. * * bottom[0] - Unary terms * bottom[1] - Softmax input/Output from the previous iteration (a copy of the unary terms if this is the first stage). * bottom[2] - RGB images * * top[0] - Output of the mean field inference (not normalized). */ template <typename Dtype> void MultiStageMeanfieldLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if(init_cpu) LOG(FATAL) << ("You initialize your network on CPU, please initialize it on GPU."); const Dtype* bottom_data = bottom[2]->gpu_data() ; split_layer_bottom_vec_[0] = bottom[0]; split_layer_->Forward(split_layer_bottom_vec_, split_layer_top_vec_); // Initialize the bilateral lattices. bilateral_lattices_.resize(num_); for (int n = 0; n < num_; ++n) { computeBilateralKernel<Dtype><<<CAFFE_GET_BLOCKS(num_pixels_), CAFFE_CUDA_NUM_THREADS>>>( num_pixels_, bottom_data, width_, height_, channels_, theta_alpha_, theta_beta_, n, bilateral_kernel_buffer_); CUDA_POST_KERNEL_CHECK; bilateral_lattices_[n].reset(new ModifiedPermutohedral()); bilateral_lattices_[n]->init(bilateral_kernel_buffer_, 5, width_, height_); // Calculate bilateral filter normalization factors. Dtype* norm_output_data = bilateral_norms_.mutable_gpu_data() + bilateral_norms_.offset(n); bilateral_lattices_[n]->compute(norm_output_data, norm_feed_, 1); computeNorm<Dtype><<<CAFFE_GET_BLOCKS(num_pixels_), CAFFE_CUDA_NUM_THREADS>>>(norm_output_data, num_pixels_); CUDA_POST_KERNEL_CHECK; } for (int i = 0; i < num_iterations_; ++i) { meanfield_iterations_[i]->PrePass(this->blobs_, &bilateral_lattices_, &bilateral_norms_); meanfield_iterations_[i]->Forward_gpu(); } } /** * Backprop through filter-based mean field inference. */ template<typename Dtype> void MultiStageMeanfieldLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if(init_cpu) LOG(FATAL) << ("You initialize your network on CPU, please initialize it on GPU."); for (int i = (num_iterations_ - 1); i >= 0; --i) { meanfield_iterations_[i]->Backward_gpu(); } vector<bool> split_layer_propagate_down(1, true); split_layer_->Backward(split_layer_top_vec_, split_layer_propagate_down, split_layer_bottom_vec_); // Accumulate diffs from mean field iterations. for (int blob_id = 0; blob_id < this->blobs_.size(); ++blob_id) { Blob<Dtype>* cur_blob = this->blobs_[blob_id].get(); if (this->param_propagate_down_[blob_id]) { caffe_gpu_set(cur_blob->count(), Dtype(0), cur_blob->mutable_gpu_diff()); for (int i = 0; i < num_iterations_; ++i) { const Dtype* diffs_to_add = meanfield_iterations_[i]->blobs()[blob_id]->gpu_diff(); caffe_gpu_axpy(cur_blob->count(), Dtype(1.), diffs_to_add, cur_blob->mutable_gpu_diff()); } } } } INSTANTIATE_LAYER_GPU_FUNCS(MultiStageMeanfieldLayer); } // namespace caffe
02647803269d859b1382abb1d28138d200ddcdd9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #include <random> // User Defined Header Files #include "cudaDeviceBuffer.h" #include "Parser.h" #include "kernels_hip.cuh" #include "VTKWriter.h" // Constants # define pi 3.14159265358979323846 // User Defined Functions real_t RestDistance(const u_int nump,const real_t volumne) { real_t n = nump / volumne ; real_t a = std::cbrt(3.0 / (4 * pi * n )) ; return 0.893 * a ; } void MaxWellBoltzmannVel(cudaDeviceBuffer<real_t> &vel){ u_int num_particles = vel.size()/ 3 ; real_t a1,a2,a3,r,s; for (u_int p = 0; p < num_particles; ++p) { u_int pindex = p * 3 ; do { a1 = 2.0 * std::rand() / ((double)RAND_MAX + 1.0 ) - 1.0 ; a2 = 2.0 * std::rand() / ((double)RAND_MAX + 1.0 ) - 1.0 ; a3 = 2.0 * std::rand() / ((double)RAND_MAX + 1.0 ) - 1.0 ; r = (a1 * a1) + (a2 * a2) + (a3 * a3) ; } while (r >= 1.0); s = std::sqrt(-2.0 * log(r)/r) ; vel[pindex] = a1 *s ; vel[pindex+1] = a2 *s ; vel[pindex+2] = a3 *s ; } } int main(int argc, char **argv){ // Read from the file. Parser p(argv[1]); p.readParameters(); p.readInputConfiguration(); // Simulation Parameters real_t time_end = std::stod(p.params["time_end"]) ; real_t timestep_length = std::stod(p.params["timestep_length"]) ; u_int vtk_out_freq = std::stol(p.params["vtk_out_freq"]) ; u_int threads_per_blocks = std::stol(p.params["cl_workgroup_1dsize"]) ; std::string vtk_name = p.params["vtk_out_name_base"] ; real_t xmin = std::stod(p.params["x_min"]); // We will assume cubic domain real_t xmax = std::stod(p.params["x_max"]); real_t ymin = std::stod(p.params["y_min"]); real_t ymax = std::stod(p.params["y_max"]); real_t zmin = std::stod(p.params["z_min"]); real_t zmax = std::stod(p.params["z_max"]); real_t re = std::stod(p.params["re"]) ; real_t nu = std::stod(p.params["nu"]); real_t k = std::stod(p.params["k"]); real_t restpressure = std::stod(p.params["p0"]); real_t restdensity = std::stod(p.params["rho"]); real_t stiffness = std::stod(p.params["stiffness"]); real_t damping = std::stod(p.params["damping"]); real_t d = std::stod(p.params["d"]); real_t sigma = std::stod(p.params["sigma"]); // Computing the Cell length const real_t celllength = re ; const u_int numcellx = (xmax - xmin) / celllength ; // Number of particles const u_int numparticles = p.num_particles ; printf("Rest distance is %f\n",d); // Number of Cell const u_int numcells = numcellx * numcellx * numcellx ; // Buffers cudaDeviceBuffer<real_t> mass(numparticles,"Scalar") ; cudaDeviceBuffer<real_t> position(numparticles,"Vector") ; cudaDeviceBuffer<real_t> density(numparticles,"Scalar") ; cudaDeviceBuffer<real_t> pressure(numparticles,"Scalar") ; cudaDeviceBuffer<real_t> velocity(numparticles,"Vector") ; cudaDeviceBuffer<real_t> forceold(numparticles,"Vector") ; cudaDeviceBuffer<real_t> forcenew(numparticles,"Vector") ; cudaDeviceBuffer<int> cell_list(numcells,"Scalar"); cudaDeviceBuffer<int> particle_list(numparticles,"Scalar"); cudaDeviceBuffer<real_t> const_args(9,"Scalar"); cudaDeviceBuffer<u_int> num_cells(3,"Scalar"); // Fill the buffers from Initial list p.fillBuffers(mass,velocity,position) ; // Velocity according to Maxwell Boltzmann distribution //Filling in the host data for the constant arguments const_args[0] = xmin; const_args[1] = xmax; const_args[2] = ymin; const_args[3] = ymax; const_args[4] = zmin; const_args[5] = zmax; const_args[6] = celllength; const_args[7] = celllength; const_args[8] = celllength; //Number of cells per dimension assuming cubic domain num_cells[0] = numcellx; num_cells[1] = numcellx; num_cells[2] = numcellx; // Allocating memory on Device mass.allocateOnDevice(); position.allocateOnDevice(); velocity.allocateOnDevice(); pressure.allocateOnDevice() ; density.allocateOnDevice() ; forceold.allocateOnDevice(); forcenew.allocateOnDevice(); cell_list.allocateOnDevice(); particle_list.allocateOnDevice(); const_args.allocateOnDevice(); num_cells.allocateOnDevice(); //Copy to Device mass.copyToDevice(); position.copyToDevice(); velocity.copyToDevice(); pressure.copyToDevice(); density.copyToDevice() ; forceold.copyToDevice(); forcenew.copyToDevice(); cell_list.copyToDevice(); particle_list.copyToDevice(); const_args.copyToDevice(); num_cells.copyToDevice(); // Calculate the number of blocks to launch u_int blocks_p,blocks_c,threads_p,threads_c; threads_p = threads_per_blocks; threads_c = threads_per_blocks; if(numparticles%threads_per_blocks == 0){ blocks_p = numparticles/threads_p; } else{ blocks_p = numparticles/threads_p+1; } if(numcells%threads_per_blocks){ blocks_c = numcells/threads_c; } else{ blocks_c = numparticles/threads_c+1; } VTKWriter writer(vtk_name); { hipLaunchKernelGGL(( InitializeCellList), dim3(blocks_c),dim3(threads_c), 0, 0, cell_list.devicePtr,numcells) ; hipLaunchKernelGGL(( InitializePartList), dim3(blocks_p),dim3(threads_p), 0, 0, particle_list.devicePtr,numparticles) ; hipLaunchKernelGGL(( UpdateList), dim3(blocks_p),dim3(threads_p), 0, 0, cell_list.devicePtr,particle_list.devicePtr, position.devicePtr,celllength,numparticles,numcellx) ; hipLaunchKernelGGL(( CalculateDensity), dim3(blocks_p),dim3(threads_p) , 0, 0, mass.devicePtr,cell_list.devicePtr,particle_list.devicePtr, density.devicePtr,position.devicePtr,re,numparticles,celllength,numcellx) ; hipLaunchKernelGGL(( CalculatePressure), dim3(blocks_p),dim3(threads_p), 0, 0, pressure.devicePtr,density.devicePtr, restpressure,restdensity,k,numparticles) ; hipLaunchKernelGGL(( CalculateForce), dim3(blocks_p),dim3(threads_p), 0, 0, velocity.devicePtr,forcenew.devicePtr,cell_list.devicePtr, particle_list.devicePtr,mass.devicePtr,pressure.devicePtr,density.devicePtr, position.devicePtr,numparticles,celllength,numcellx,re,nu,sigma) ; //BoundarySweep<<<blocks_p,threads_p>>>(forcenew.devicePtr,density.devicePtr,mass.devicePtr,timestep_length,position.devicePtr,d,numparticles,re,const_args[1],1); hipLaunchKernelGGL(( BoundarySweepSD), dim3(blocks_p),dim3(threads_p), 0, 0, forcenew.devicePtr,density.devicePtr,position.devicePtr,mass.devicePtr,\ d,numparticles,velocity.devicePtr,re,xmax,stiffness,damping); int iter=0; for (real_t t = 0.0;t<=time_end; t+= timestep_length) { if(iter % vtk_out_freq == 0){ // copy to host back forcenew.copyToHost(); forceold.copyToHost(); position.copyToHost(); velocity.copyToHost(); writer.writeVTKOutput(mass,position,velocity,numparticles); } hipLaunchKernelGGL(( positionUpdate), dim3(blocks_p),dim3(threads_p), 0, 0, forcenew.devicePtr,position.devicePtr,velocity.devicePtr, mass.devicePtr,numparticles,timestep_length) ; hipLaunchKernelGGL(( copyForces), dim3(blocks_p),dim3(threads_p), 0, 0, forceold.devicePtr,forcenew.devicePtr,numparticles); hipLaunchKernelGGL(( InitializeCellList), dim3(blocks_c),dim3(threads_c), 0, 0, cell_list.devicePtr,numcells) ; hipLaunchKernelGGL(( InitializePartList), dim3(blocks_p),dim3(threads_p), 0, 0, particle_list.devicePtr,numparticles) ; hipLaunchKernelGGL(( UpdateList), dim3(blocks_p),dim3(threads_p), 0, 0, cell_list.devicePtr,particle_list.devicePtr, position.devicePtr,celllength,numparticles,numcellx) ; hipLaunchKernelGGL(( CalculateDensity), dim3(blocks_p),dim3(threads_p) , 0, 0, mass.devicePtr,cell_list.devicePtr,particle_list.devicePtr, density.devicePtr,position.devicePtr,re,numparticles,celllength,numcellx) ; hipLaunchKernelGGL(( CalculatePressure), dim3(blocks_p),dim3(threads_p), 0, 0, pressure.devicePtr,density.devicePtr, restpressure,restdensity,k,numparticles) ; hipLaunchKernelGGL(( CalculateForce), dim3(blocks_p),dim3(threads_p), 0, 0, velocity.devicePtr,forcenew.devicePtr,cell_list.devicePtr, particle_list.devicePtr,mass.devicePtr,pressure.devicePtr, density.devicePtr,position.devicePtr,numparticles,celllength,numcellx,re,nu,sigma) ; // BoundarySweep<<<blocks_p,threads_p>>> (forcenew.devicePtr,density.devicePtr,mass.devicePtr,timestep_length, // position.devicePtr,d,numparticles,re,const_args[0],1); hipLaunchKernelGGL(( BoundarySweepSD), dim3(blocks_p),dim3(threads_p), 0, 0, forcenew.devicePtr,density.devicePtr,position.devicePtr,mass.devicePtr,\ d,numparticles,velocity.devicePtr,re,xmax,stiffness,damping); hipLaunchKernelGGL(( velocityUpdate), dim3(blocks_p),dim3(threads_p), 0, 0, forceold.devicePtr,forcenew.devicePtr,mass.devicePtr, velocity.devicePtr,numparticles,timestep_length); iter++; } } }
02647803269d859b1382abb1d28138d200ddcdd9.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <cuda.h> #include <iostream> #include <random> // User Defined Header Files #include "cudaDeviceBuffer.h" #include "Parser.h" #include "kernels.cuh" #include "VTKWriter.h" // Constants # define pi 3.14159265358979323846 // User Defined Functions real_t RestDistance(const u_int nump,const real_t volumne) { real_t n = nump / volumne ; real_t a = std::cbrt(3.0 / (4 * pi * n )) ; return 0.893 * a ; } void MaxWellBoltzmannVel(cudaDeviceBuffer<real_t> &vel){ u_int num_particles = vel.size()/ 3 ; real_t a1,a2,a3,r,s; for (u_int p = 0; p < num_particles; ++p) { u_int pindex = p * 3 ; do { a1 = 2.0 * std::rand() / ((double)RAND_MAX + 1.0 ) - 1.0 ; a2 = 2.0 * std::rand() / ((double)RAND_MAX + 1.0 ) - 1.0 ; a3 = 2.0 * std::rand() / ((double)RAND_MAX + 1.0 ) - 1.0 ; r = (a1 * a1) + (a2 * a2) + (a3 * a3) ; } while (r >= 1.0); s = std::sqrt(-2.0 * log(r)/r) ; vel[pindex] = a1 *s ; vel[pindex+1] = a2 *s ; vel[pindex+2] = a3 *s ; } } int main(int argc, char **argv){ // Read from the file. Parser p(argv[1]); p.readParameters(); p.readInputConfiguration(); // Simulation Parameters real_t time_end = std::stod(p.params["time_end"]) ; real_t timestep_length = std::stod(p.params["timestep_length"]) ; u_int vtk_out_freq = std::stol(p.params["vtk_out_freq"]) ; u_int threads_per_blocks = std::stol(p.params["cl_workgroup_1dsize"]) ; std::string vtk_name = p.params["vtk_out_name_base"] ; real_t xmin = std::stod(p.params["x_min"]); // We will assume cubic domain real_t xmax = std::stod(p.params["x_max"]); real_t ymin = std::stod(p.params["y_min"]); real_t ymax = std::stod(p.params["y_max"]); real_t zmin = std::stod(p.params["z_min"]); real_t zmax = std::stod(p.params["z_max"]); real_t re = std::stod(p.params["re"]) ; real_t nu = std::stod(p.params["nu"]); real_t k = std::stod(p.params["k"]); real_t restpressure = std::stod(p.params["p0"]); real_t restdensity = std::stod(p.params["rho"]); real_t stiffness = std::stod(p.params["stiffness"]); real_t damping = std::stod(p.params["damping"]); real_t d = std::stod(p.params["d"]); real_t sigma = std::stod(p.params["sigma"]); // Computing the Cell length const real_t celllength = re ; const u_int numcellx = (xmax - xmin) / celllength ; // Number of particles const u_int numparticles = p.num_particles ; printf("Rest distance is %f\n",d); // Number of Cell const u_int numcells = numcellx * numcellx * numcellx ; // Buffers cudaDeviceBuffer<real_t> mass(numparticles,"Scalar") ; cudaDeviceBuffer<real_t> position(numparticles,"Vector") ; cudaDeviceBuffer<real_t> density(numparticles,"Scalar") ; cudaDeviceBuffer<real_t> pressure(numparticles,"Scalar") ; cudaDeviceBuffer<real_t> velocity(numparticles,"Vector") ; cudaDeviceBuffer<real_t> forceold(numparticles,"Vector") ; cudaDeviceBuffer<real_t> forcenew(numparticles,"Vector") ; cudaDeviceBuffer<int> cell_list(numcells,"Scalar"); cudaDeviceBuffer<int> particle_list(numparticles,"Scalar"); cudaDeviceBuffer<real_t> const_args(9,"Scalar"); cudaDeviceBuffer<u_int> num_cells(3,"Scalar"); // Fill the buffers from Initial list p.fillBuffers(mass,velocity,position) ; // Velocity according to Maxwell Boltzmann distribution //Filling in the host data for the constant arguments const_args[0] = xmin; const_args[1] = xmax; const_args[2] = ymin; const_args[3] = ymax; const_args[4] = zmin; const_args[5] = zmax; const_args[6] = celllength; const_args[7] = celllength; const_args[8] = celllength; //Number of cells per dimension assuming cubic domain num_cells[0] = numcellx; num_cells[1] = numcellx; num_cells[2] = numcellx; // Allocating memory on Device mass.allocateOnDevice(); position.allocateOnDevice(); velocity.allocateOnDevice(); pressure.allocateOnDevice() ; density.allocateOnDevice() ; forceold.allocateOnDevice(); forcenew.allocateOnDevice(); cell_list.allocateOnDevice(); particle_list.allocateOnDevice(); const_args.allocateOnDevice(); num_cells.allocateOnDevice(); //Copy to Device mass.copyToDevice(); position.copyToDevice(); velocity.copyToDevice(); pressure.copyToDevice(); density.copyToDevice() ; forceold.copyToDevice(); forcenew.copyToDevice(); cell_list.copyToDevice(); particle_list.copyToDevice(); const_args.copyToDevice(); num_cells.copyToDevice(); // Calculate the number of blocks to launch u_int blocks_p,blocks_c,threads_p,threads_c; threads_p = threads_per_blocks; threads_c = threads_per_blocks; if(numparticles%threads_per_blocks == 0){ blocks_p = numparticles/threads_p; } else{ blocks_p = numparticles/threads_p+1; } if(numcells%threads_per_blocks){ blocks_c = numcells/threads_c; } else{ blocks_c = numparticles/threads_c+1; } VTKWriter writer(vtk_name); { InitializeCellList<<<blocks_c,threads_c>>>(cell_list.devicePtr,numcells) ; InitializePartList<<<blocks_p,threads_p>>>(particle_list.devicePtr,numparticles) ; UpdateList<<<blocks_p,threads_p>>>(cell_list.devicePtr,particle_list.devicePtr, position.devicePtr,celllength,numparticles,numcellx) ; CalculateDensity<<<blocks_p,threads_p >>>(mass.devicePtr,cell_list.devicePtr,particle_list.devicePtr, density.devicePtr,position.devicePtr,re,numparticles,celllength,numcellx) ; CalculatePressure<<<blocks_p,threads_p>>>(pressure.devicePtr,density.devicePtr, restpressure,restdensity,k,numparticles) ; CalculateForce<<<blocks_p,threads_p>>>(velocity.devicePtr,forcenew.devicePtr,cell_list.devicePtr, particle_list.devicePtr,mass.devicePtr,pressure.devicePtr,density.devicePtr, position.devicePtr,numparticles,celllength,numcellx,re,nu,sigma) ; //BoundarySweep<<<blocks_p,threads_p>>>(forcenew.devicePtr,density.devicePtr,mass.devicePtr,timestep_length,position.devicePtr,d,numparticles,re,const_args[1],1); BoundarySweepSD<<<blocks_p,threads_p>>>(forcenew.devicePtr,density.devicePtr,position.devicePtr,mass.devicePtr,\ d,numparticles,velocity.devicePtr,re,xmax,stiffness,damping); int iter=0; for (real_t t = 0.0;t<=time_end; t+= timestep_length) { if(iter % vtk_out_freq == 0){ // copy to host back forcenew.copyToHost(); forceold.copyToHost(); position.copyToHost(); velocity.copyToHost(); writer.writeVTKOutput(mass,position,velocity,numparticles); } positionUpdate<<<blocks_p,threads_p>>>(forcenew.devicePtr,position.devicePtr,velocity.devicePtr, mass.devicePtr,numparticles,timestep_length) ; copyForces<<<blocks_p,threads_p>>>(forceold.devicePtr,forcenew.devicePtr,numparticles); InitializeCellList<<<blocks_c,threads_c>>>(cell_list.devicePtr,numcells) ; InitializePartList<<<blocks_p,threads_p>>>(particle_list.devicePtr,numparticles) ; UpdateList<<<blocks_p,threads_p>>>(cell_list.devicePtr,particle_list.devicePtr, position.devicePtr,celllength,numparticles,numcellx) ; CalculateDensity<<<blocks_p,threads_p >>>(mass.devicePtr,cell_list.devicePtr,particle_list.devicePtr, density.devicePtr,position.devicePtr,re,numparticles,celllength,numcellx) ; CalculatePressure<<<blocks_p,threads_p>>>(pressure.devicePtr,density.devicePtr, restpressure,restdensity,k,numparticles) ; CalculateForce<<<blocks_p,threads_p>>>(velocity.devicePtr,forcenew.devicePtr,cell_list.devicePtr, particle_list.devicePtr,mass.devicePtr,pressure.devicePtr, density.devicePtr,position.devicePtr,numparticles,celllength,numcellx,re,nu,sigma) ; // BoundarySweep<<<blocks_p,threads_p>>> (forcenew.devicePtr,density.devicePtr,mass.devicePtr,timestep_length, // position.devicePtr,d,numparticles,re,const_args[0],1); BoundarySweepSD<<<blocks_p,threads_p>>>(forcenew.devicePtr,density.devicePtr,position.devicePtr,mass.devicePtr,\ d,numparticles,velocity.devicePtr,re,xmax,stiffness,damping); velocityUpdate<<<blocks_p,threads_p>>>(forceold.devicePtr,forcenew.devicePtr,mass.devicePtr, velocity.devicePtr,numparticles,timestep_length); iter++; } } }
535d96d398433345aa2ded6eaf5976b10cfa5092.hip
// !!! This is a file automatically generated by hipify!!! #include "rc4_parallel.cuh" #include "rc4_common.h" #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <hip/hip_runtime.h> void print_buffer_in_hex(u_char* buf, int size) { int i; for (i=0; i<size; i++) { printf("0x%x ", buf[i] ); } printf("\n"); } void encrypt_chunk(u_char* host_databuf, u_char* host_keybuf, u_char* cuda_databuf, u_char* cuda_keybuf, int length) { hipMemcpy(cuda_databuf, host_databuf, length, hipMemcpyHostToDevice); hipMemcpy(cuda_keybuf, host_keybuf, length, hipMemcpyHostToDevice); int threads = 256; int blocks = (length / 256); if (blocks == 0) { blocks = 1; } hipLaunchKernelGGL(( rc4_crypt_kernel), dim3(blocks), dim3(threads), 0, 0, cuda_databuf, cuda_keybuf, length); fprintf(stderr, "Kernel launch error? %s\n", hipGetErrorString(hipGetLastError())); if (host_databuf[length] != '\0') { fprintf(stderr, "host_databuf isn't null terminated\n"); } // Recover encrypted material hipMemcpy(host_databuf, cuda_databuf, length, hipMemcpyDeviceToHost); fwrite(host_databuf, sizeof(u_char), length, stdout); } void encrypt_stdin_buffered_parallel(int bufsize, rc4_state* s) { u_char* buffer = (u_char*) malloc(sizeof(u_char) * (bufsize) ); // Null-terminate the buffer for printing; we never write to the null-byte u_char* keybuffer = (u_char*) malloc(sizeof(u_char) * bufsize); u_char* cuda_keybuffer; u_char* cuda_databuffer; // u_char* output = (u_char*) malloc(sizeof(u_char) * bufsize); hipMalloc(&cuda_keybuffer, bufsize); hipMalloc(&cuda_databuffer, bufsize); size_t sz = sizeof(u_char); int i = 0; while (!feof(stdin)){ while (i < bufsize && !feof(stdin)) { int j = fread( &buffer[i], sz, (bufsize - i), stdin); i += j; } get_n_bytes_of_key(s, keybuffer, i); encrypt_chunk(buffer, keybuffer, cuda_databuffer, cuda_keybuffer, i); i = 0; } //printf("Reached EOF\n"); free(buffer); free(keybuffer); } rc4_state_t* setup_state_with_key(u_char* key, int keylen) { rc4_state_t* s = (rc4_state_t*) malloc(sizeof(rc4_state_t)); rc4_initialize(s, key, keylen); return s; } int main(int argc, char *argv[]) { if (argc == 1) { printf("Must specify key as arg 1\n"); exit(255); } //printf("Key is %s\n", argv[1]); int keylen = strlen(argv[1]); //printf("Keylen is %d\n", keylen); int buffersz = 512; //size shipped to gpu if (argc == 3) { buffersz = atoi(argv[2]); } u_char* key = (u_char*) malloc(keylen); memcpy(key, argv[1], keylen); rc4_state_t* state = setup_state_with_key(key, keylen); encrypt_stdin_buffered_parallel(buffersz, state); }
535d96d398433345aa2ded6eaf5976b10cfa5092.cu
#include "rc4_parallel.cuh" #include "rc4_common.h" #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <cuda.h> void print_buffer_in_hex(u_char* buf, int size) { int i; for (i=0; i<size; i++) { printf("0x%x ", buf[i] ); } printf("\n"); } void encrypt_chunk(u_char* host_databuf, u_char* host_keybuf, u_char* cuda_databuf, u_char* cuda_keybuf, int length) { cudaMemcpy(cuda_databuf, host_databuf, length, cudaMemcpyHostToDevice); cudaMemcpy(cuda_keybuf, host_keybuf, length, cudaMemcpyHostToDevice); int threads = 256; int blocks = (length / 256); if (blocks == 0) { blocks = 1; } rc4_crypt_kernel<<<blocks, threads>>>(cuda_databuf, cuda_keybuf, length); fprintf(stderr, "Kernel launch error? %s\n", cudaGetErrorString(cudaGetLastError())); if (host_databuf[length] != '\0') { fprintf(stderr, "host_databuf isn't null terminated\n"); } // Recover encrypted material cudaMemcpy(host_databuf, cuda_databuf, length, cudaMemcpyDeviceToHost); fwrite(host_databuf, sizeof(u_char), length, stdout); } void encrypt_stdin_buffered_parallel(int bufsize, rc4_state* s) { u_char* buffer = (u_char*) malloc(sizeof(u_char) * (bufsize) ); // Null-terminate the buffer for printing; we never write to the null-byte u_char* keybuffer = (u_char*) malloc(sizeof(u_char) * bufsize); u_char* cuda_keybuffer; u_char* cuda_databuffer; // u_char* output = (u_char*) malloc(sizeof(u_char) * bufsize); cudaMalloc(&cuda_keybuffer, bufsize); cudaMalloc(&cuda_databuffer, bufsize); size_t sz = sizeof(u_char); int i = 0; while (!feof(stdin)){ while (i < bufsize && !feof(stdin)) { int j = fread( &buffer[i], sz, (bufsize - i), stdin); i += j; } get_n_bytes_of_key(s, keybuffer, i); encrypt_chunk(buffer, keybuffer, cuda_databuffer, cuda_keybuffer, i); i = 0; } //printf("Reached EOF\n"); free(buffer); free(keybuffer); } rc4_state_t* setup_state_with_key(u_char* key, int keylen) { rc4_state_t* s = (rc4_state_t*) malloc(sizeof(rc4_state_t)); rc4_initialize(s, key, keylen); return s; } int main(int argc, char *argv[]) { if (argc == 1) { printf("Must specify key as arg 1\n"); exit(255); } //printf("Key is %s\n", argv[1]); int keylen = strlen(argv[1]); //printf("Keylen is %d\n", keylen); int buffersz = 512; //size shipped to gpu if (argc == 3) { buffersz = atoi(argv[2]); } u_char* key = (u_char*) malloc(keylen); memcpy(key, argv[1], keylen); rc4_state_t* state = setup_state_with_key(key, keylen); encrypt_stdin_buffered_parallel(buffersz, state); }
862e000ffcdc60c06157507401ddf06f6692574c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include "../../src/exact/argmax_by_key.cuh" #include "../../src/exact/gradients.cuh" #include "../../src/exact/node.cuh" #include "../../src/exact/loss_functions.cuh" #include "utils.cuh" namespace xgboost { namespace tree { namespace exact { TEST(ArgMaxByKey, maxSplit) { Split a, b, out; a.score = 2.f; a.index = 3; b.score = 3.f; b.index = 4; out = maxSplit(a, b); EXPECT_FLOAT_EQ(out.score, b.score); EXPECT_EQ(out.index, b.index); b.score = 2.f; b.index = 4; out = maxSplit(a, b); EXPECT_FLOAT_EQ(out.score, a.score); EXPECT_EQ(out.index, a.index); b.score = 2.f; b.index = 2; out = maxSplit(a, b); EXPECT_FLOAT_EQ(out.score, a.score); EXPECT_EQ(out.index, b.index); b.score = 1.f; b.index = 1; out = maxSplit(a, b); EXPECT_FLOAT_EQ(out.score, a.score); EXPECT_EQ(out.index, a.index); } template <typename node_id_t> void argMaxTest(ArgMaxByKeyAlgo algo) { const int nVals = 1024; const int level = 0; const int nKeys = 1 << level; gpu_gpair* scans = new gpu_gpair[nVals]; float* vals = new float[nVals]; int* colIds = new int[nVals]; scans[0] = gpu_gpair(); vals[0] = 0.f; colIds[0] = 0; for (int i = 1; i < nVals; ++i) { scans[i].g = scans[i-1].g + (0.1f * 2.f); scans[i].h = scans[i-1].h + (0.1f * 2.f); vals[i] = static_cast<float>(i) * 0.1f; colIds[i] = 0; } float* dVals; allocateAndUpdateOnGpu<float>(dVals, vals, nVals); gpu_gpair* dScans; allocateAndUpdateOnGpu<gpu_gpair>(dScans, scans, nVals); gpu_gpair* sums = new gpu_gpair[nKeys]; sums[0].g = sums[0].h = (0.1f * 2.f * nVals); gpu_gpair* dSums; allocateAndUpdateOnGpu<gpu_gpair>(dSums, sums, nKeys); int* dColIds; allocateAndUpdateOnGpu<int>(dColIds, colIds, nVals); Split* splits = new Split[nKeys]; Split* dSplits; allocateOnGpu<Split>(dSplits, nKeys); node_id_t* nodeAssigns = new node_id_t[nVals]; memset(nodeAssigns, 0, sizeof(node_id_t)*nVals); node_id_t* dNodeAssigns; allocateAndUpdateOnGpu<node_id_t>(dNodeAssigns, nodeAssigns, nVals); Node<node_id_t>* nodes = new Node<node_id_t>[nKeys]; nodes[0].gradSum = sums[0]; nodes[0].id = 0; TrainParam param; param.min_child_weight = 0.0f; param.reg_alpha = 0.f; param.reg_lambda = 2.f; param.max_delta_step = 0.f; nodes[0].score = CalcGain(param, sums[0].g, sums[0].h); Node<node_id_t>* dNodes; allocateAndUpdateOnGpu<Node<node_id_t> >(dNodes, nodes, nKeys); argMaxByKey<node_id_t>(dSplits, dScans, dSums, dVals, dColIds, dNodeAssigns, dNodes, nKeys, 0, nVals, param, algo); updateHostPtr<Split>(splits, dSplits, nKeys); EXPECT_FLOAT_EQ(0.f, splits->score); EXPECT_EQ(0, splits->index); dh::safe_cuda(hipFree(dNodeAssigns)); delete [] nodeAssigns; dh::safe_cuda(hipFree(dSplits)); delete [] splits; dh::safe_cuda(hipFree(dColIds)); delete [] colIds; dh::safe_cuda(hipFree(dSums)); delete [] sums; dh::safe_cuda(hipFree(dVals)); delete [] vals; dh::safe_cuda(hipFree(dScans)); delete [] scans; } TEST(ArgMaxByKey, testOneColGmem) { argMaxTest<int16_t>(ABK_GMEM); } TEST(ArgMaxByKey, testOneColSmem) { argMaxTest<int16_t>(ABK_SMEM); } } // namespace exact } // namespace tree } // namespace xgboost
862e000ffcdc60c06157507401ddf06f6692574c.cu
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include "../../src/exact/argmax_by_key.cuh" #include "../../src/exact/gradients.cuh" #include "../../src/exact/node.cuh" #include "../../src/exact/loss_functions.cuh" #include "utils.cuh" namespace xgboost { namespace tree { namespace exact { TEST(ArgMaxByKey, maxSplit) { Split a, b, out; a.score = 2.f; a.index = 3; b.score = 3.f; b.index = 4; out = maxSplit(a, b); EXPECT_FLOAT_EQ(out.score, b.score); EXPECT_EQ(out.index, b.index); b.score = 2.f; b.index = 4; out = maxSplit(a, b); EXPECT_FLOAT_EQ(out.score, a.score); EXPECT_EQ(out.index, a.index); b.score = 2.f; b.index = 2; out = maxSplit(a, b); EXPECT_FLOAT_EQ(out.score, a.score); EXPECT_EQ(out.index, b.index); b.score = 1.f; b.index = 1; out = maxSplit(a, b); EXPECT_FLOAT_EQ(out.score, a.score); EXPECT_EQ(out.index, a.index); } template <typename node_id_t> void argMaxTest(ArgMaxByKeyAlgo algo) { const int nVals = 1024; const int level = 0; const int nKeys = 1 << level; gpu_gpair* scans = new gpu_gpair[nVals]; float* vals = new float[nVals]; int* colIds = new int[nVals]; scans[0] = gpu_gpair(); vals[0] = 0.f; colIds[0] = 0; for (int i = 1; i < nVals; ++i) { scans[i].g = scans[i-1].g + (0.1f * 2.f); scans[i].h = scans[i-1].h + (0.1f * 2.f); vals[i] = static_cast<float>(i) * 0.1f; colIds[i] = 0; } float* dVals; allocateAndUpdateOnGpu<float>(dVals, vals, nVals); gpu_gpair* dScans; allocateAndUpdateOnGpu<gpu_gpair>(dScans, scans, nVals); gpu_gpair* sums = new gpu_gpair[nKeys]; sums[0].g = sums[0].h = (0.1f * 2.f * nVals); gpu_gpair* dSums; allocateAndUpdateOnGpu<gpu_gpair>(dSums, sums, nKeys); int* dColIds; allocateAndUpdateOnGpu<int>(dColIds, colIds, nVals); Split* splits = new Split[nKeys]; Split* dSplits; allocateOnGpu<Split>(dSplits, nKeys); node_id_t* nodeAssigns = new node_id_t[nVals]; memset(nodeAssigns, 0, sizeof(node_id_t)*nVals); node_id_t* dNodeAssigns; allocateAndUpdateOnGpu<node_id_t>(dNodeAssigns, nodeAssigns, nVals); Node<node_id_t>* nodes = new Node<node_id_t>[nKeys]; nodes[0].gradSum = sums[0]; nodes[0].id = 0; TrainParam param; param.min_child_weight = 0.0f; param.reg_alpha = 0.f; param.reg_lambda = 2.f; param.max_delta_step = 0.f; nodes[0].score = CalcGain(param, sums[0].g, sums[0].h); Node<node_id_t>* dNodes; allocateAndUpdateOnGpu<Node<node_id_t> >(dNodes, nodes, nKeys); argMaxByKey<node_id_t>(dSplits, dScans, dSums, dVals, dColIds, dNodeAssigns, dNodes, nKeys, 0, nVals, param, algo); updateHostPtr<Split>(splits, dSplits, nKeys); EXPECT_FLOAT_EQ(0.f, splits->score); EXPECT_EQ(0, splits->index); dh::safe_cuda(cudaFree(dNodeAssigns)); delete [] nodeAssigns; dh::safe_cuda(cudaFree(dSplits)); delete [] splits; dh::safe_cuda(cudaFree(dColIds)); delete [] colIds; dh::safe_cuda(cudaFree(dSums)); delete [] sums; dh::safe_cuda(cudaFree(dVals)); delete [] vals; dh::safe_cuda(cudaFree(dScans)); delete [] scans; } TEST(ArgMaxByKey, testOneColGmem) { argMaxTest<int16_t>(ABK_GMEM); } TEST(ArgMaxByKey, testOneColSmem) { argMaxTest<int16_t>(ABK_SMEM); } } // namespace exact } // namespace tree } // namespace xgboost
4e7268151c616e4c8986e2dfa9a986f19fe9b83b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "defines.cu" /// Strict ReLU back propagation /// @brief Updates backpropagated error by activation derivative. /// @details err_y *= (y > 0) ? 1 : 0 extern "C" __global__ void err_y_update(dtype *err_y, const dtype *y) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if ((idx < ERR_OUTPUT_SIZE) && (y[idx] <= 0)) { err_y[idx] = 0; } }
4e7268151c616e4c8986e2dfa9a986f19fe9b83b.cu
#include "defines.cu" /// Strict ReLU back propagation /// @brief Updates backpropagated error by activation derivative. /// @details err_y *= (y > 0) ? 1 : 0 extern "C" __global__ void err_y_update(dtype *err_y, const dtype *y) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if ((idx < ERR_OUTPUT_SIZE) && (y[idx] <= 0)) { err_y[idx] = 0; } }
c8f68ddce39cf195cd30fbbd25c88179807aa047.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "BriskScaleSpace.cuh" __global__ void refineKernel1(BriskScaleSpace space, float2* keypoints, float* kpSize, float* kpScore, const int threshold_, int whichLayer) { const int kpIdx = threadIdx.x + blockIdx.x * blockDim.x; const short2& point = space.kpsLoc[whichLayer][kpIdx]; // first check if it is a maximum: // //todo : seems not necessary? if (!space.isMax2D(space.pyramid_, 0, (int) point.x, (int) point.y)) return; // let's do the subpixel and float scale refinement: BriskLayerOne& l = space.pyramid_[0]; int s_0_0 = l.getAgastScore(point.x - 1, point.y - 1, 1); int s_1_0 = l.getAgastScore(point.x, point.y - 1, 1); int s_2_0 = l.getAgastScore(point.x + 1, point.y - 1, 1); int s_2_1 = l.getAgastScore(point.x + 1, point.y, 1); int s_1_1 = l.getAgastScore(point.x, point.y, 1); int s_0_1 = l.getAgastScore(point.x - 1, point.y, 1); int s_0_2 = l.getAgastScore(point.x - 1, point.y + 1, 1); int s_1_2 = l.getAgastScore(point.x, point.y + 1, 1); int s_2_2 = l.getAgastScore(point.x + 1, point.y + 1, 1); float delta_x, delta_y; float max = space.subpixel2D(s_0_0, s_0_1, s_0_2, s_1_0, s_1_1, s_1_2, s_2_0, s_2_1, s_2_2, delta_x, delta_y); // store: const unsigned int ind = atomicInc(&g_counter1, (unsigned int) (-1)); keypoints[ind] = make_float2(float(point.x) + delta_x, float(point.y) + delta_y); kpSize[ind] = space.basicSize_; kpScore[ind] = max; //keypoints.push_back(cv::KeyPoint(float(point.x) + delta_x, float(point.y) + delta_y, basicSize_, -1, max, 0)); } __global__ void refineKernel2(BriskScaleSpace space, float2* keypoints, float* kpSize, float* kpScore,const int threshold_) { int safeThreshold_ = (int)(threshold_ * space.safetyFactor_); int i = blockIdx.x; float x, y, scale, score; const int n = threadIdx.x + blockIdx.y * blockDim.x; // may cause problem if (n >= space.kpsCount[i]) { return; } else { BriskLayerOne& l = space.pyramid_[i]; if (i == space.layers_ - 1) { //for (size_t n = 0; n < space.c; n++) // { const short2& point = space.kpsLoc[i][n]; // consider only 2D maxima... if (!space.isMax2D(space.pyramid_, i, (int) point.x, (int) point.y)) return; bool ismax; float dx, dy; space.getScoreMaxBelow(space.pyramid_, i, (int) point.x, (int) point.y, l.getAgastScore(point.x, point.y, safeThreshold_), ismax, dx, dy); if (!ismax) return; // get the patch on this layer: int s_0_0 = l.getAgastScore(point.x - 1, point.y - 1, 1); int s_1_0 = l.getAgastScore(point.x, point.y - 1, 1); int s_2_0 = l.getAgastScore(point.x + 1, point.y - 1, 1); int s_2_1 = l.getAgastScore(point.x + 1, point.y, 1); int s_1_1 = l.getAgastScore(point.x, point.y, 1); int s_0_1 = l.getAgastScore(point.x - 1, point.y, 1); int s_0_2 = l.getAgastScore(point.x - 1, point.y + 1, 1); int s_1_2 = l.getAgastScore(point.x, point.y + 1, 1); int s_2_2 = l.getAgastScore(point.x + 1, point.y + 1, 1); float delta_x, delta_y; float max = space.subpixel2D(s_0_0, s_0_1, s_0_2, s_1_0, s_1_1, s_1_2, s_2_0, s_2_1, s_2_2, delta_x, delta_y); const unsigned int ind = atomicInc(&g_counter1, (unsigned int) (-1)); keypoints[ind] = make_float2( (float(point.x) + delta_x) * l.scale() + l.offset(),//todo: find the meaning of offset (float(point.y) + delta_y) * l.scale() + l.offset()); kpSize[ind] = space.basicSize_ * l.scale(); kpScore[ind] = max; /* // store: keypoints.push_back( cv::KeyPoint((float(point.x) + delta_x) * l.scale() + l.offset(), (float(point.y) + delta_y) * l.scale() + l.offset(), basicSize_ * l.scale(), -1, max, i));*/ // } } else { // not the last layer: //for (size_t n = 0; n < num; n++) //{ const short2& point = space.kpsLoc[i][n]; // first check if it is a maximum: if (!space.isMax2D(space.pyramid_, i, (int) point.x, (int) point.y)) return; // let's do the subpixel and float scale refinement: bool ismax = false; //refine3D score = space.refine3D(space.pyramid_, i, (int) point.x, (int) point.y, x, y, scale, ismax); if (!ismax) { return; } //basicSize // finally store the detected keypoint: if (score > float(threshold_)) { const unsigned int ind = atomicInc(&g_counter1, (unsigned int) (-1)); keypoints[ind] = make_float2(x, y); kpSize[ind] = space.basicSize_ * scale; kpScore[ind] = score; //keypoints.push_back(cv::KeyPoint(x, y, basicSize_ * scale, -1, score, i)); } } } } //for (int i = 0; i < layers_; i++) //{ // //const size_t num = agastPoints[i].size(); // }
c8f68ddce39cf195cd30fbbd25c88179807aa047.cu
#include "BriskScaleSpace.cuh" __global__ void refineKernel1(BriskScaleSpace space, float2* keypoints, float* kpSize, float* kpScore, const int threshold_, int whichLayer) { const int kpIdx = threadIdx.x + blockIdx.x * blockDim.x; const short2& point = space.kpsLoc[whichLayer][kpIdx]; // first check if it is a maximum: //非极大值抑制 //todo : seems not necessary? if (!space.isMax2D(space.pyramid_, 0, (int) point.x, (int) point.y)) return; // let's do the subpixel and float scale refinement: BriskLayerOne& l = space.pyramid_[0]; int s_0_0 = l.getAgastScore(point.x - 1, point.y - 1, 1); int s_1_0 = l.getAgastScore(point.x, point.y - 1, 1); int s_2_0 = l.getAgastScore(point.x + 1, point.y - 1, 1); int s_2_1 = l.getAgastScore(point.x + 1, point.y, 1); int s_1_1 = l.getAgastScore(point.x, point.y, 1); int s_0_1 = l.getAgastScore(point.x - 1, point.y, 1); int s_0_2 = l.getAgastScore(point.x - 1, point.y + 1, 1); int s_1_2 = l.getAgastScore(point.x, point.y + 1, 1); int s_2_2 = l.getAgastScore(point.x + 1, point.y + 1, 1); float delta_x, delta_y; float max = space.subpixel2D(s_0_0, s_0_1, s_0_2, s_1_0, s_1_1, s_1_2, s_2_0, s_2_1, s_2_2, delta_x, delta_y); // store: const unsigned int ind = atomicInc(&g_counter1, (unsigned int) (-1)); keypoints[ind] = make_float2(float(point.x) + delta_x, float(point.y) + delta_y); kpSize[ind] = space.basicSize_; kpScore[ind] = max; //keypoints.push_back(cv::KeyPoint(float(point.x) + delta_x, float(point.y) + delta_y, basicSize_, -1, max, 0)); } __global__ void refineKernel2(BriskScaleSpace space, float2* keypoints, float* kpSize, float* kpScore,const int threshold_) { int safeThreshold_ = (int)(threshold_ * space.safetyFactor_); int i = blockIdx.x; float x, y, scale, score; const int n = threadIdx.x + blockIdx.y * blockDim.x; // may cause problem if (n >= space.kpsCount[i]) { return; } else { BriskLayerOne& l = space.pyramid_[i]; if (i == space.layers_ - 1) { //for (size_t n = 0; n < space.c; n++) // { const short2& point = space.kpsLoc[i][n]; // consider only 2D maxima... if (!space.isMax2D(space.pyramid_, i, (int) point.x, (int) point.y)) return; bool ismax; float dx, dy; space.getScoreMaxBelow(space.pyramid_, i, (int) point.x, (int) point.y, l.getAgastScore(point.x, point.y, safeThreshold_), ismax, dx, dy); if (!ismax) return; // get the patch on this layer: int s_0_0 = l.getAgastScore(point.x - 1, point.y - 1, 1); int s_1_0 = l.getAgastScore(point.x, point.y - 1, 1); int s_2_0 = l.getAgastScore(point.x + 1, point.y - 1, 1); int s_2_1 = l.getAgastScore(point.x + 1, point.y, 1); int s_1_1 = l.getAgastScore(point.x, point.y, 1); int s_0_1 = l.getAgastScore(point.x - 1, point.y, 1); int s_0_2 = l.getAgastScore(point.x - 1, point.y + 1, 1); int s_1_2 = l.getAgastScore(point.x, point.y + 1, 1); int s_2_2 = l.getAgastScore(point.x + 1, point.y + 1, 1); float delta_x, delta_y; float max = space.subpixel2D(s_0_0, s_0_1, s_0_2, s_1_0, s_1_1, s_1_2, s_2_0, s_2_1, s_2_2, delta_x, delta_y); const unsigned int ind = atomicInc(&g_counter1, (unsigned int) (-1)); keypoints[ind] = make_float2( (float(point.x) + delta_x) * l.scale() + l.offset(),//todo: find the meaning of offset (float(point.y) + delta_y) * l.scale() + l.offset()); kpSize[ind] = space.basicSize_ * l.scale(); kpScore[ind] = max; /* // store: keypoints.push_back( cv::KeyPoint((float(point.x) + delta_x) * l.scale() + l.offset(), (float(point.y) + delta_y) * l.scale() + l.offset(), basicSize_ * l.scale(), -1, max, i));*/ // } } else { // not the last layer: //for (size_t n = 0; n < num; n++) //{ const short2& point = space.kpsLoc[i][n]; // first check if it is a maximum: if (!space.isMax2D(space.pyramid_, i, (int) point.x, (int) point.y)) return; // let's do the subpixel and float scale refinement: bool ismax = false; //可见refine3D是真正判断是否最大的货色 score = space.refine3D(space.pyramid_, i, (int) point.x, (int) point.y, x, y, scale, ismax); if (!ismax) { return; } //理解这个basicSize的真实含义 // finally store the detected keypoint: if (score > float(threshold_)) { const unsigned int ind = atomicInc(&g_counter1, (unsigned int) (-1)); keypoints[ind] = make_float2(x, y); kpSize[ind] = space.basicSize_ * scale; kpScore[ind] = score; //keypoints.push_back(cv::KeyPoint(x, y, basicSize_ * scale, -1, score, i)); } } } } //for (int i = 0; i < layers_; i++) //{ // //const size_t num = agastPoints[i].size(); // }
e702093293724fbc6b709de841547d92d81fdd78.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" //#include "REPEATL.h" #include "../include/REPEATR.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 16 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 #define ITERATIONS REPLACE_ITERATIONS // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){ if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (400*max_tid*LINE_SIZE)/sizeof(int); unsigned j=0, k=0; int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<ITERATIONS ; i++){ REPEAT_L6(0); //REPLACE_ITERATIONS } /* // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs for(k=0; k<ITERATIONS; ++k){ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){ C[tid+j] = A[tid+j]; } } */ C[0]=sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); //checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); //checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) hipFree(d_A); //if (d_B) // hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
e702093293724fbc6b709de841547d92d81fdd78.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" //#include "REPEATL.h" #include "../include/REPEATR.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 16 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 #define ITERATIONS REPLACE_ITERATIONS // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (400*max_tid*LINE_SIZE)/sizeof(int); unsigned j=0, k=0; int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<ITERATIONS ; i++){ REPEAT_L6(0); //REPLACE_ITERATIONS } /* // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs for(k=0; k<ITERATIONS; ++k){ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){ C[tid+j] = A[tid+j]; } } */ C[0]=sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); //checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); //checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) cudaFree(d_A); //if (d_B) // cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
35e495d5cde138ed621a5d5c779d5bf7cde3dc78.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "rgb_to_yuv_convert_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "../rgb_to_yuv_convert_layer.h" #include "../neural_network_exception.h" #include "../nn_types.h" #define w_r 0.299F #define w_b 0.114F #define w_g (1.0F - w_r - w_b) #define u_max 0.436F #define v_max 0.615F #define u_mult (u_max / (1.0F - w_b)) #define v_mult (v_max / (1.0F - w_r)) #define reverse_r_v_mult ((1.0F - w_r) / v_max) #define reverse_g_u_mult (-(w_b * (1.0F - w_b)) / (u_max * w_g)) #define reverse_g_v_mult (-(w_r * (1.0F - w_r)) / (v_max * w_g)) #define reverse_b_u_mult ((1.0F - w_b) / u_max) namespace nnforge { namespace cuda { __global__ void rgb_to_yuv_convert_upd_kernel( const float * __restrict input, float * __restrict output, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float red = input[red_and_y_offset]; float green = input[green_and_u_offset]; float blue = input[blue_and_v_offset]; float y = w_r * red + w_g * green + w_b * blue; float u = u_mult * (blue - y); float v = v_mult * (red - y); output[red_and_y_offset] = y; output[green_and_u_offset] = u; output[blue_and_v_offset] = v; } } __global__ void rgb_to_yuv_convert_deriviative_upd_kernel( float * __restrict errors, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float y = errors[red_and_y_offset]; float u = errors[green_and_u_offset]; float v = errors[blue_and_v_offset]; float red = y + reverse_r_v_mult * v; float green = y + reverse_g_u_mult * u + reverse_g_v_mult * v; float blue = y + reverse_b_u_mult * u; errors[red_and_y_offset] = red; errors[green_and_u_offset] = green; errors[blue_and_v_offset] = blue; } } rgb_to_yuv_convert_layer_updater_cuda::rgb_to_yuv_convert_layer_updater_cuda() { } rgb_to_yuv_convert_layer_updater_cuda::~rgb_to_yuv_convert_layer_updater_cuda() { } void rgb_to_yuv_convert_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { if (offset_input_entry_id > 0) throw neural_network_exception("rgb_to_yuv_convert_layer_updater_cuda is not able to run using offset"); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); hipLaunchKernelGGL(( rgb_to_yuv_convert_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], input_configuration_specific.feature_map_count, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); } void rgb_to_yuv_convert_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); hipLaunchKernelGGL(( rgb_to_yuv_convert_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_errors_buffer, *schema_data[0], input_configuration_specific.feature_map_count, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); } bool rgb_to_yuv_convert_layer_updater_cuda::is_in_place_backprop() const { return true; } void rgb_to_yuv_convert_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema); color_feature_map_config_count = layer_derived->color_feature_map_config_list.size(); } } }
35e495d5cde138ed621a5d5c779d5bf7cde3dc78.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "rgb_to_yuv_convert_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "../rgb_to_yuv_convert_layer.h" #include "../neural_network_exception.h" #include "../nn_types.h" #define w_r 0.299F #define w_b 0.114F #define w_g (1.0F - w_r - w_b) #define u_max 0.436F #define v_max 0.615F #define u_mult (u_max / (1.0F - w_b)) #define v_mult (v_max / (1.0F - w_r)) #define reverse_r_v_mult ((1.0F - w_r) / v_max) #define reverse_g_u_mult (-(w_b * (1.0F - w_b)) / (u_max * w_g)) #define reverse_g_v_mult (-(w_r * (1.0F - w_r)) / (v_max * w_g)) #define reverse_b_u_mult ((1.0F - w_b) / u_max) namespace nnforge { namespace cuda { __global__ void rgb_to_yuv_convert_upd_kernel( const float * __restrict input, float * __restrict output, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float red = input[red_and_y_offset]; float green = input[green_and_u_offset]; float blue = input[blue_and_v_offset]; float y = w_r * red + w_g * green + w_b * blue; float u = u_mult * (blue - y); float v = v_mult * (red - y); output[red_and_y_offset] = y; output[green_and_u_offset] = u; output[blue_and_v_offset] = v; } } __global__ void rgb_to_yuv_convert_deriviative_upd_kernel( float * __restrict errors, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float y = errors[red_and_y_offset]; float u = errors[green_and_u_offset]; float v = errors[blue_and_v_offset]; float red = y + reverse_r_v_mult * v; float green = y + reverse_g_u_mult * u + reverse_g_v_mult * v; float blue = y + reverse_b_u_mult * u; errors[red_and_y_offset] = red; errors[green_and_u_offset] = green; errors[blue_and_v_offset] = blue; } } rgb_to_yuv_convert_layer_updater_cuda::rgb_to_yuv_convert_layer_updater_cuda() { } rgb_to_yuv_convert_layer_updater_cuda::~rgb_to_yuv_convert_layer_updater_cuda() { } void rgb_to_yuv_convert_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { if (offset_input_entry_id > 0) throw neural_network_exception("rgb_to_yuv_convert_layer_updater_cuda is not able to run using offset"); std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); rgb_to_yuv_convert_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_neurons_buffer, *output_neurons_buffer, *schema_data[0], input_configuration_specific.feature_map_count, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); } void rgb_to_yuv_convert_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); rgb_to_yuv_convert_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_errors_buffer, *schema_data[0], input_configuration_specific.feature_map_count, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); } bool rgb_to_yuv_convert_layer_updater_cuda::is_in_place_backprop() const { return true; } void rgb_to_yuv_convert_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema); color_feature_map_config_count = layer_derived->color_feature_map_config_list.size(); } } }
b1ee9a87801b7ac86c250be97e727ac5dd3b8c3b.hip
// !!! This is a file automatically generated by hipify!!! /* * * Based on Nvidia convolution separable example. * * * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cstdio> #include <hip/hip_runtime.h> #include <assert.h> #include "../../cucheck.h" #include "wavelet.h" //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel[KERNEL_LENGTH*2]; //////////////////////////////////////////////////////////////////////////////// // Row convolution with Low and Hi pass filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 16 #define ROWS_RESULT_STEPS 1 //8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsMirrorHiLoKernel(float *d_Dst, float *d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int fidx = threadIdx.x % 2; d_Src += baseY * pitch + baseX; const int half = (baseY * pitch + (blockIdx.x * ROWS_BLOCKDIM_X + threadIdx.x))/2; d_Dst += half+(fidx*(imageH*pitch)/2) - ROWS_HALO_STEPS * ROWS_BLOCKDIM_X; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { // If HALO is > 1 maybe d_Src[i * ROWS_BLOCKDIM_X - baseX*2]; is not correct for every ROW_HALO_STEP s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[i * ROWS_BLOCKDIM_X - baseX*2]; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[i * ROWS_BLOCKDIM_X - (threadIdx.x+1)*2]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[fidx * KERNEL_LENGTH + KERNEL_RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } __global__ void invConvolutionRowsMirrorHiLoKernel(float *d_Dst, float *d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int fidx = threadIdx.x % 2; const int half = (baseY * pitch + (blockIdx.x * ROWS_BLOCKDIM_X + threadIdx.x))/2; d_Src += half+(fidx*(imageH*pitch)/2) - ROWS_HALO_STEPS * ROWS_BLOCKDIM_X; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { // If HALO is > 1 maybe d_Src[i * ROWS_BLOCKDIM_X - baseX*2]; is not correct for every ROW_HALO_STEP s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[i * ROWS_BLOCKDIM_X - baseX*2]; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[i * ROWS_BLOCKDIM_X - (threadIdx.x+1)*2]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[fidx * KERNEL_LENGTH + KERNEL_RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void fwt_1D(float **data, const unsigned level, const unsigned nx, const unsigned ny) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(nx % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(ny % ROWS_BLOCKDIM_Y == 0); const int mem_size = nx*ny*sizeof(float); float *data1, *data2, *aux; data1 = *data; hipMalloc(&data2, mem_size); unsigned w = nx; dim3 blocks(nx / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), ny / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionRowsMirrorHiLoKernel), dim3(blocks), dim3(threads), 0, 0, data2, data1, w, ny, w); CUCHECK(hipGetLastError()); for (unsigned i = 1; i < level; i++) { blocks.x /= 2; w /= 2; aux = data2; data2 = data1; data1 = aux; hipMemcpy(data2+w*ny, data1+w*ny, w*ny*sizeof(float), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( convolutionRowsMirrorHiLoKernel), dim3(blocks), dim3(threads), 0, 0, data2, data1, w, ny, w); CUCHECK(hipGetLastError()); } *data = data2; hipFree(data1); printf("Rows fwt_1D: %s\n",hipGetErrorString(hipGetLastError())); } void iwt_1D(float **data, const unsigned level, const unsigned nx, const unsigned ny) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(nx % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(ny % ROWS_BLOCKDIM_Y == 0); const int mem_size = nx*ny*sizeof(float); float *data1, *data2, *aux; data1 = *data; hipMalloc(&data2, mem_size); unsigned w = nx >> (level-1); dim3 blocks(w / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), ny / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( invConvolutionRowsMirrorHiLoKernel), dim3(blocks), dim3(threads), 0, 0, data2, data1, w, ny, w); CUCHECK(hipGetLastError()); for (unsigned i = 1; i < level; i++) { hipMemcpy(data2+w*ny, data1+w*ny, (nx-w)*ny*sizeof(float), hipMemcpyDeviceToDevice); blocks.x *= 2; w *= 2; aux = data2; data2 = data1; data1 = aux; hipLaunchKernelGGL(( invConvolutionRowsMirrorHiLoKernel), dim3(blocks), dim3(threads), 0, 0, data2, data1, w, ny, w); CUCHECK(hipGetLastError()); } *data = data2; hipFree(data1); printf("Rows iwt_1D: %s\n",hipGetErrorString(hipGetLastError())); } //////////////////////////////////////////////////////////////////////////////// // Transpose //////////////////////////////////////////////////////////////////////////////// const int TILE_DIM = 32; const int BLOCK_ROWS = 8; __global__ void transposeDiagonal(float *odata, const float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } int xIndex = blockIdx_x*TILE_DIM + threadIdx.x; int yIndex = blockIdx_y*TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y*TILE_DIM + threadIdx.x; yIndex = blockIdx_x*TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } void transpose(float *tdata, const float *idata, const unsigned nx, const unsigned ny) { dim3 grid(nx/TILE_DIM, ny/TILE_DIM); dim3 threads(TILE_DIM,BLOCK_ROWS); hipLaunchKernelGGL(( transposeDiagonal), dim3(grid), dim3(threads), 0, 0, tdata, idata, nx, ny); CUCHECK(hipDeviceSynchronize()); } extern "C" void setUpFilter(const float *filter){ hipMemcpyToSymbol(c_Kernel, filter, KERNEL_LENGTH*2 * sizeof(float)); printf("Setup: %s\n",hipGetErrorString(hipGetLastError())); } extern "C" void fwt_1D_GPU(float *data, const unsigned level, const unsigned nx, const unsigned ny) { const int mem_size = nx*ny*sizeof(float); float *d_idata; hipMalloc(&d_idata, mem_size); hipMemcpy(d_idata, data, mem_size, hipMemcpyHostToDevice); fwt_1D(&d_idata, level, nx, ny); hipMemcpy(data, d_idata, mem_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("FWT_GPU: %s\n",hipGetErrorString(hipGetLastError())); hipFree(d_idata); } extern "C" void iwt_1D_GPU(float *data, const unsigned level, const unsigned nx, const unsigned ny) { const int mem_size = nx*ny*sizeof(float); float *d_idata; hipMalloc(&d_idata, mem_size); hipMemcpy(d_idata, data, mem_size, hipMemcpyHostToDevice); iwt_1D(&d_idata, level, nx, ny); hipMemcpy(data, d_idata, mem_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("IWT_GPU: %s\n",hipGetErrorString(hipGetLastError())); hipFree(d_idata); } // data_dest: set this to the device address where the output data resides. // If this is not equal to 'data', then 'data' has been freed. extern "C" void wavelet_cuda_3d_fwd(float *data, const unsigned nx, const unsigned ny, const unsigned nz, const unsigned lvlx, const unsigned lvly, const unsigned lvlz, bool data_is_on_gpu) { const int mem_size = nx*ny*nz*sizeof(float); float *d_idata, *d_tdata; hipMalloc(&d_tdata, mem_size); hipMalloc(&d_idata, mem_size); if (data_is_on_gpu) { hipMemcpy(d_idata, data, mem_size, hipMemcpyDeviceToDevice); } else { hipMemcpy(d_idata, data, mem_size, hipMemcpyHostToDevice); } fwt_1D(&d_idata, lvlx, nx, ny*nz); transpose(d_tdata, d_idata, nx, ny*nz); fwt_1D(&d_tdata, lvly, ny, nz*nx); transpose(d_idata, d_tdata, ny, nz*nx); fwt_1D(&d_idata, lvlz, nz, nx*ny); if (data_is_on_gpu) { hipMemcpy(data, d_idata, mem_size, hipMemcpyDeviceToDevice); } else { hipMemcpy(data, d_idata, mem_size, hipMemcpyDeviceToHost); } hipFree(d_idata); //hipDeviceSynchronize(); //printf("comp: %s\n",hipGetErrorString(hipGetLastError())); hipFree(d_tdata); } extern "C" void wavelet_cuda_3d_back(float *data, const unsigned nx, const unsigned ny, const unsigned nz, const unsigned lvlx, const unsigned lvly, const unsigned lvlz) { const int mem_size = nx*ny*nz*sizeof(float); float *d_idata, *d_tdata; hipMalloc(&d_idata, mem_size); hipMalloc(&d_tdata, mem_size); hipMemcpy(d_idata, data, mem_size, hipMemcpyHostToDevice); iwt_1D(&d_idata, lvlz, nz, nx*ny); transpose(d_tdata, d_idata, nz*nx, ny); iwt_1D(&d_tdata, lvly, ny, nz*nx); transpose(d_idata, d_tdata, ny*nz, nx); iwt_1D(&d_idata, lvlx, nx, ny*nz); hipMemcpy(data, d_idata, mem_size, hipMemcpyDeviceToHost); //hipDeviceSynchronize(); //printf("ucomp: %s\n",hipGetErrorString(hipGetLastError())); hipFree(d_idata); hipFree(d_tdata); }
b1ee9a87801b7ac86c250be97e727ac5dd3b8c3b.cu
/* * * Based on Nvidia convolution separable example. * * * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cstdio> #include <cuda.h> #include <assert.h> #include "../../cucheck.h" #include "wavelet.h" //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel[KERNEL_LENGTH*2]; //////////////////////////////////////////////////////////////////////////////// // Row convolution with Low and Hi pass filter //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 16 #define ROWS_RESULT_STEPS 1 //8 #define ROWS_HALO_STEPS 1 __global__ void convolutionRowsMirrorHiLoKernel(float *d_Dst, float *d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int fidx = threadIdx.x % 2; d_Src += baseY * pitch + baseX; const int half = (baseY * pitch + (blockIdx.x * ROWS_BLOCKDIM_X + threadIdx.x))/2; d_Dst += half+(fidx*(imageH*pitch)/2) - ROWS_HALO_STEPS * ROWS_BLOCKDIM_X; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { // If HALO is > 1 maybe d_Src[i * ROWS_BLOCKDIM_X - baseX*2]; is not correct for every ROW_HALO_STEP s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[i * ROWS_BLOCKDIM_X - baseX*2]; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[i * ROWS_BLOCKDIM_X - (threadIdx.x+1)*2]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[fidx * KERNEL_LENGTH + KERNEL_RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } __global__ void invConvolutionRowsMirrorHiLoKernel(float *d_Dst, float *d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int fidx = threadIdx.x % 2; const int half = (baseY * pitch + (blockIdx.x * ROWS_BLOCKDIM_X + threadIdx.x))/2; d_Src += half+(fidx*(imageH*pitch)/2) - ROWS_HALO_STEPS * ROWS_BLOCKDIM_X; d_Dst += baseY * pitch + baseX; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { // If HALO is > 1 maybe d_Src[i * ROWS_BLOCKDIM_X - baseX*2]; is not correct for every ROW_HALO_STEP s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[i * ROWS_BLOCKDIM_X - baseX*2]; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : d_Src[i * ROWS_BLOCKDIM_X - (threadIdx.x+1)*2]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) { sum += c_Kernel[fidx * KERNEL_LENGTH + KERNEL_RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } void fwt_1D(float **data, const unsigned level, const unsigned nx, const unsigned ny) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(nx % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(ny % ROWS_BLOCKDIM_Y == 0); const int mem_size = nx*ny*sizeof(float); float *data1, *data2, *aux; data1 = *data; cudaMalloc(&data2, mem_size); unsigned w = nx; dim3 blocks(nx / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), ny / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); convolutionRowsMirrorHiLoKernel<<<blocks, threads>>>(data2, data1, w, ny, w); CUCHECK(cudaGetLastError()); for (unsigned i = 1; i < level; i++) { blocks.x /= 2; w /= 2; aux = data2; data2 = data1; data1 = aux; cudaMemcpy(data2+w*ny, data1+w*ny, w*ny*sizeof(float), cudaMemcpyDeviceToDevice); convolutionRowsMirrorHiLoKernel<<<blocks, threads>>>(data2, data1, w, ny, w); CUCHECK(cudaGetLastError()); } *data = data2; cudaFree(data1); printf("Rows fwt_1D: %s\n",cudaGetErrorString(cudaGetLastError())); } void iwt_1D(float **data, const unsigned level, const unsigned nx, const unsigned ny) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS); assert(nx % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(ny % ROWS_BLOCKDIM_Y == 0); const int mem_size = nx*ny*sizeof(float); float *data1, *data2, *aux; data1 = *data; cudaMalloc(&data2, mem_size); unsigned w = nx >> (level-1); dim3 blocks(w / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), ny / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); invConvolutionRowsMirrorHiLoKernel<<<blocks, threads>>>(data2, data1, w, ny, w); CUCHECK(cudaGetLastError()); for (unsigned i = 1; i < level; i++) { cudaMemcpy(data2+w*ny, data1+w*ny, (nx-w)*ny*sizeof(float), cudaMemcpyDeviceToDevice); blocks.x *= 2; w *= 2; aux = data2; data2 = data1; data1 = aux; invConvolutionRowsMirrorHiLoKernel<<<blocks, threads>>>(data2, data1, w, ny, w); CUCHECK(cudaGetLastError()); } *data = data2; cudaFree(data1); printf("Rows iwt_1D: %s\n",cudaGetErrorString(cudaGetLastError())); } //////////////////////////////////////////////////////////////////////////////// // Transpose //////////////////////////////////////////////////////////////////////////////// const int TILE_DIM = 32; const int BLOCK_ROWS = 8; __global__ void transposeDiagonal(float *odata, const float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } int xIndex = blockIdx_x*TILE_DIM + threadIdx.x; int yIndex = blockIdx_y*TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y*TILE_DIM + threadIdx.x; yIndex = blockIdx_x*TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } void transpose(float *tdata, const float *idata, const unsigned nx, const unsigned ny) { dim3 grid(nx/TILE_DIM, ny/TILE_DIM); dim3 threads(TILE_DIM,BLOCK_ROWS); transposeDiagonal<<<grid, threads>>>(tdata, idata, nx, ny); CUCHECK(cudaDeviceSynchronize()); } extern "C" void setUpFilter(const float *filter){ cudaMemcpyToSymbol(c_Kernel, filter, KERNEL_LENGTH*2 * sizeof(float)); printf("Setup: %s\n",cudaGetErrorString(cudaGetLastError())); } extern "C" void fwt_1D_GPU(float *data, const unsigned level, const unsigned nx, const unsigned ny) { const int mem_size = nx*ny*sizeof(float); float *d_idata; cudaMalloc(&d_idata, mem_size); cudaMemcpy(d_idata, data, mem_size, cudaMemcpyHostToDevice); fwt_1D(&d_idata, level, nx, ny); cudaMemcpy(data, d_idata, mem_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("FWT_GPU: %s\n",cudaGetErrorString(cudaGetLastError())); cudaFree(d_idata); } extern "C" void iwt_1D_GPU(float *data, const unsigned level, const unsigned nx, const unsigned ny) { const int mem_size = nx*ny*sizeof(float); float *d_idata; cudaMalloc(&d_idata, mem_size); cudaMemcpy(d_idata, data, mem_size, cudaMemcpyHostToDevice); iwt_1D(&d_idata, level, nx, ny); cudaMemcpy(data, d_idata, mem_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("IWT_GPU: %s\n",cudaGetErrorString(cudaGetLastError())); cudaFree(d_idata); } // data_dest: set this to the device address where the output data resides. // If this is not equal to 'data', then 'data' has been freed. extern "C" void wavelet_cuda_3d_fwd(float *data, const unsigned nx, const unsigned ny, const unsigned nz, const unsigned lvlx, const unsigned lvly, const unsigned lvlz, bool data_is_on_gpu) { const int mem_size = nx*ny*nz*sizeof(float); float *d_idata, *d_tdata; cudaMalloc(&d_tdata, mem_size); cudaMalloc(&d_idata, mem_size); if (data_is_on_gpu) { cudaMemcpy(d_idata, data, mem_size, cudaMemcpyDeviceToDevice); } else { cudaMemcpy(d_idata, data, mem_size, cudaMemcpyHostToDevice); } fwt_1D(&d_idata, lvlx, nx, ny*nz); transpose(d_tdata, d_idata, nx, ny*nz); fwt_1D(&d_tdata, lvly, ny, nz*nx); transpose(d_idata, d_tdata, ny, nz*nx); fwt_1D(&d_idata, lvlz, nz, nx*ny); if (data_is_on_gpu) { cudaMemcpy(data, d_idata, mem_size, cudaMemcpyDeviceToDevice); } else { cudaMemcpy(data, d_idata, mem_size, cudaMemcpyDeviceToHost); } cudaFree(d_idata); //cudaDeviceSynchronize(); //printf("comp: %s\n",cudaGetErrorString(cudaGetLastError())); cudaFree(d_tdata); } extern "C" void wavelet_cuda_3d_back(float *data, const unsigned nx, const unsigned ny, const unsigned nz, const unsigned lvlx, const unsigned lvly, const unsigned lvlz) { const int mem_size = nx*ny*nz*sizeof(float); float *d_idata, *d_tdata; cudaMalloc(&d_idata, mem_size); cudaMalloc(&d_tdata, mem_size); cudaMemcpy(d_idata, data, mem_size, cudaMemcpyHostToDevice); iwt_1D(&d_idata, lvlz, nz, nx*ny); transpose(d_tdata, d_idata, nz*nx, ny); iwt_1D(&d_tdata, lvly, ny, nz*nx); transpose(d_idata, d_tdata, ny*nz, nx); iwt_1D(&d_idata, lvlx, nx, ny*nz); cudaMemcpy(data, d_idata, mem_size, cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); //printf("ucomp: %s\n",cudaGetErrorString(cudaGetLastError())); cudaFree(d_idata); cudaFree(d_tdata); }
6dd23bf44057bdd59b59864db8da07ad73a5ad36.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); } // namespace caffe
6dd23bf44057bdd59b59864db8da07ad73a5ad36.cu
#include <vector> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); } // namespace caffe
476913ba1d2a2b4041cf20ae68f4358ce2d8e077.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************************************************************** Implementing Breadth first search on CUDA using algorithm given in DAC'10 paper "An Effective GPU Implementation of Breadth-First Search" Copyright (c) 2010 University of Illinois at Urbana-Champaign. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Author: Lijiuan Luo ([email protected]) ************************************************************************************/ #ifndef _KERNEL_H_ #define _KERNEL_H_ /********** Define colors for BFS 1) the definition of White, gray and black comes from the text book "Introduction to Algorithms" 2) For path search problems, people may choose to use different colors to record the found paths. Therefore we reserve numbers (0-16677216) for this purpose. Only nodes with colors bigger than UP_LIMIT are free to visit 3) We define two gray shades to differentiate between the new frontier nodes and the old frontier nodes that have not been marked BLACK *************/ #define UP_LIMIT 16677216//2^24 #define WHITE 16677217 #define GRAY 16677218 #define GRAY0 16677219 #define GRAY1 16677220 #define BLACK 16677221 #include "config.h" texture<Node> g_graph_node_ref; texture<Edge> g_graph_edge_ref; volatile __device__ int count = 0; volatile __device__ int no_of_nodes_vol = 0; volatile __device__ int stay_vol = 0; /***************************************************************************** This is the most general version of BFS kernel, i.e. no assumption about #block in the grid \param q1: the array to hold the current frontier \param q2: the array to hold the new frontier \param g_graph_nodes: the nodes in the input graph \param g_graph_edges: the edges i nthe input graph \param g_color: the colors of nodes \param g_cost: the costs of nodes \param no_of_nodes: the number of nodes in the current frontier \param tail: pointer to the location of the tail of the new frontier. *tail is the size of the new frontier \param gray_shade: the shade of the gray in current BFS propagation. See GRAY0, GRAY1 macro definitions for more details \param k: the level of current propagation in the BFS tree. k= 0 for the first propagation. ***********************************************************************/ __global__ void BFS_kernel(int * q1, int * q2, Node* g_graph_nodes, Edge* g_graph_edges, int* g_color, int * g_cost, int no_of_nodes, int * tail, int gray_shade, int k) { __shared__ int local_q_tail;//the tails of each local warp-level queue __shared__ int local_q[NUM_BIN*W_QUEUE_SIZE];//the local warp-level queues //current w-queue, a.k.a prefix sum __shared__ int shift; if(threadIdx.x == 0){ local_q_tail = 0;//initialize the tail of w-queue } __syncthreads(); //first, propagate and add the new frontier elements into w-queues int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; if( tid<no_of_nodes) { int pid = q1[tid]; //the current frontier node, or the parent node of the new frontier nodes g_color[pid] = BLACK; int cur_cost = g_cost[pid]; //into Node cur_node = tex1Dfetch(g_graph_node_ref,pid); for(int i=cur_node.x; i<cur_node.y + cur_node.x; i++)//visit each neighbor of the //current frontier node. { Edge cur_edge = tex1Dfetch(g_graph_edge_ref,i); int id = cur_edge.x; int cost = cur_edge.y; cost += cur_cost; int orig_cost = atomicMin(&g_cost[id],cost); if(orig_cost > cost){//the node should be visited if(g_color[id] > UP_LIMIT){ int old_color = atomicExch(&g_color[id],gray_shade); //this guarantees that only one thread will push this node //into a queue if(old_color != gray_shade) { //atomic operation guarantees the correctness //even if multiple warps are executing simultaneously int index = atomicAdd(&local_q_tail,1); local_q[index] = id; } } } } } __syncthreads(); if(threadIdx.x == 0){ int tot_sum = local_q_tail; //the offset or "shift" of the block-level queue within the grid-level queue //is determined by atomic operation shift = atomicAdd(tail,tot_sum); } __syncthreads(); int local_shift = threadIdx.x;//shift within a w-queue //loop unrolling was originally used for better performance, but removed for better readability while(local_shift < local_q_tail){ q2[shift + local_shift] = local_q[local_shift]; local_shift += blockDim.x;//multiple threads are copying elements at the same time, //so we shift by multiple elements for next iteration } } #endif
476913ba1d2a2b4041cf20ae68f4358ce2d8e077.cu
/*********************************************************************************** Implementing Breadth first search on CUDA using algorithm given in DAC'10 paper "An Effective GPU Implementation of Breadth-First Search" Copyright (c) 2010 University of Illinois at Urbana-Champaign. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Author: Lijiuan Luo ([email protected]) ************************************************************************************/ #ifndef _KERNEL_H_ #define _KERNEL_H_ /********** Define colors for BFS 1) the definition of White, gray and black comes from the text book "Introduction to Algorithms" 2) For path search problems, people may choose to use different colors to record the found paths. Therefore we reserve numbers (0-16677216) for this purpose. Only nodes with colors bigger than UP_LIMIT are free to visit 3) We define two gray shades to differentiate between the new frontier nodes and the old frontier nodes that have not been marked BLACK *************/ #define UP_LIMIT 16677216//2^24 #define WHITE 16677217 #define GRAY 16677218 #define GRAY0 16677219 #define GRAY1 16677220 #define BLACK 16677221 #include "config.h" texture<Node> g_graph_node_ref; texture<Edge> g_graph_edge_ref; volatile __device__ int count = 0; volatile __device__ int no_of_nodes_vol = 0; volatile __device__ int stay_vol = 0; /***************************************************************************** This is the most general version of BFS kernel, i.e. no assumption about #block in the grid \param q1: the array to hold the current frontier \param q2: the array to hold the new frontier \param g_graph_nodes: the nodes in the input graph \param g_graph_edges: the edges i nthe input graph \param g_color: the colors of nodes \param g_cost: the costs of nodes \param no_of_nodes: the number of nodes in the current frontier \param tail: pointer to the location of the tail of the new frontier. *tail is the size of the new frontier \param gray_shade: the shade of the gray in current BFS propagation. See GRAY0, GRAY1 macro definitions for more details \param k: the level of current propagation in the BFS tree. k= 0 for the first propagation. ***********************************************************************/ __global__ void BFS_kernel(int * q1, int * q2, Node* g_graph_nodes, Edge* g_graph_edges, int* g_color, int * g_cost, int no_of_nodes, int * tail, int gray_shade, int k) { __shared__ int local_q_tail;//the tails of each local warp-level queue __shared__ int local_q[NUM_BIN*W_QUEUE_SIZE];//the local warp-level queues //current w-queue, a.k.a prefix sum __shared__ int shift; if(threadIdx.x == 0){ local_q_tail = 0;//initialize the tail of w-queue } __syncthreads(); //first, propagate and add the new frontier elements into w-queues int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; if( tid<no_of_nodes) { int pid = q1[tid]; //the current frontier node, or the parent node of the new frontier nodes g_color[pid] = BLACK; int cur_cost = g_cost[pid]; //into Node cur_node = tex1Dfetch(g_graph_node_ref,pid); for(int i=cur_node.x; i<cur_node.y + cur_node.x; i++)//visit each neighbor of the //current frontier node. { Edge cur_edge = tex1Dfetch(g_graph_edge_ref,i); int id = cur_edge.x; int cost = cur_edge.y; cost += cur_cost; int orig_cost = atomicMin(&g_cost[id],cost); if(orig_cost > cost){//the node should be visited if(g_color[id] > UP_LIMIT){ int old_color = atomicExch(&g_color[id],gray_shade); //this guarantees that only one thread will push this node //into a queue if(old_color != gray_shade) { //atomic operation guarantees the correctness //even if multiple warps are executing simultaneously int index = atomicAdd(&local_q_tail,1); local_q[index] = id; } } } } } __syncthreads(); if(threadIdx.x == 0){ int tot_sum = local_q_tail; //the offset or "shift" of the block-level queue within the grid-level queue //is determined by atomic operation shift = atomicAdd(tail,tot_sum); } __syncthreads(); int local_shift = threadIdx.x;//shift within a w-queue //loop unrolling was originally used for better performance, but removed for better readability while(local_shift < local_q_tail){ q2[shift + local_shift] = local_q[local_shift]; local_shift += blockDim.x;//multiple threads are copying elements at the same time, //so we shift by multiple elements for next iteration } } #endif
df0b5edc78c6ff75e5234252b10a8edf7a14242e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #define O_TILE_WIDTH 8 #define BLOCK_WIDTH (O_TILE_WIDTH + 8) __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. // Note: this operation is kind of convolution int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*O_TILE_WIDTH + ty; int col_o = blockIdx.x*O_TILE_WIDTH + tx; // We need load more data into shared memory to do calculation // we need load (O_TILE_WIDTH + filterWidth-1)^2 data into shared memeory, filterWidth is 9 here // to calculate an output at row_o, we should begin at row_o-(filterWidth/2) int row_i = row_o - 4; int col_i = col_o - 4; // Fast memory access // load all data into shared memory __shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH]; //clamp to boundary of the image row_i = min(max(row_i,0),numRows-1); col_i = min(max(col_i,0),numCols-1); int offset = row_i*numCols + col_i; Ns[ty][tx] = inputChannel[offset]; // make sure that all needed data are loaded into shared memory __syncthreads(); // do calculation, only O_TILE_WIDTH*O_TILE_WIDTH threads participate in calculating outputs // only safe threads participate in writing output float data = 0.0f; if( tx < O_TILE_WIDTH && ty < O_TILE_WIDTH && row_o < numRows && col_o < numCols){ for(int i = 0; i < filterWidth; i++) for(int j = 0; j < filterWidth; j++) data += filter[i*filterWidth+j] * Ns[i+ty][j+tx]; //if(row_o < numRows && col_o < numCols) outputChannel[row_o*numCols+col_o] = data; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int row = blockIdx.y*O_TILE_WIDTH + threadIdx.y; int col = blockIdx.x*O_TILE_WIDTH + threadIdx.x; // do calculation, only O_TILE_WIDTH*O_TILE_WIDTH threads participate in calculating outputs // only safe threads participate in writing output if( threadIdx.y < O_TILE_WIDTH && threadIdx.x < O_TILE_WIDTH && (row < numRows) && (col < numCols) ){ // Note: this operation is kind of transpose pattern // we set block number and size under the 1-channel position, so each thread should load 3 elements // ignore the 4th channel (alpha value) int offset = row*numCols + col; redChannel[offset] = inputImageRGBA[offset].x; greenChannel[offset] = inputImageRGBA[offset].y; blueChannel[offset] = inputImageRGBA[offset].z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { int row = blockIdx.y*O_TILE_WIDTH + threadIdx.y; int col = blockIdx.x*O_TILE_WIDTH + threadIdx.x; // do calculation, only O_TILE_WIDTH*O_TILE_WIDTH threads participate in calculating outputs // only safe threads participate in writing output if( threadIdx.y < O_TILE_WIDTH && threadIdx.x < O_TILE_WIDTH && (row < numRows) && (col < numCols) ){ // Note: this operation is kind of transpose pattern const int thread_1D_pos = row*numCols + col; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) // consider this as 2-D convolution operation with 2-D MASK, here i used tile-convolution // We need load enough elements into shared memory in kernel function, so the width of one block is the tile width + mask with - 1 const dim3 blockSize(BLOCK_WIDTH,BLOCK_WIDTH); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. // We need enough block to cover all pixels in a image const dim3 gridSize( numCols/O_TILE_WIDTH+1, numRows/O_TILE_WIDTH+1, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); // free filter memory checkCudaErrors(hipFree(d_filter)); }
df0b5edc78c6ff75e5234252b10a8edf7a14242e.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #define O_TILE_WIDTH 8 #define BLOCK_WIDTH (O_TILE_WIDTH + 8) __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. // Note: this operation is kind of convolution int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*O_TILE_WIDTH + ty; int col_o = blockIdx.x*O_TILE_WIDTH + tx; // We need load more data into shared memory to do calculation // we need load (O_TILE_WIDTH + filterWidth-1)^2 data into shared memeory, filterWidth is 9 here // to calculate an output at row_o, we should begin at row_o-(filterWidth/2) int row_i = row_o - 4; int col_i = col_o - 4; // Fast memory access // load all data into shared memory __shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH]; //clamp to boundary of the image row_i = min(max(row_i,0),numRows-1); col_i = min(max(col_i,0),numCols-1); int offset = row_i*numCols + col_i; Ns[ty][tx] = inputChannel[offset]; // make sure that all needed data are loaded into shared memory __syncthreads(); // do calculation, only O_TILE_WIDTH*O_TILE_WIDTH threads participate in calculating outputs // only safe threads participate in writing output float data = 0.0f; if( tx < O_TILE_WIDTH && ty < O_TILE_WIDTH && row_o < numRows && col_o < numCols){ for(int i = 0; i < filterWidth; i++) for(int j = 0; j < filterWidth; j++) data += filter[i*filterWidth+j] * Ns[i+ty][j+tx]; //if(row_o < numRows && col_o < numCols) outputChannel[row_o*numCols+col_o] = data; } } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int row = blockIdx.y*O_TILE_WIDTH + threadIdx.y; int col = blockIdx.x*O_TILE_WIDTH + threadIdx.x; // do calculation, only O_TILE_WIDTH*O_TILE_WIDTH threads participate in calculating outputs // only safe threads participate in writing output if( threadIdx.y < O_TILE_WIDTH && threadIdx.x < O_TILE_WIDTH && (row < numRows) && (col < numCols) ){ // Note: this operation is kind of transpose pattern // we set block number and size under the 1-channel position, so each thread should load 3 elements // ignore the 4th channel (alpha value) int offset = row*numCols + col; redChannel[offset] = inputImageRGBA[offset].x; greenChannel[offset] = inputImageRGBA[offset].y; blueChannel[offset] = inputImageRGBA[offset].z; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { int row = blockIdx.y*O_TILE_WIDTH + threadIdx.y; int col = blockIdx.x*O_TILE_WIDTH + threadIdx.x; // do calculation, only O_TILE_WIDTH*O_TILE_WIDTH threads participate in calculating outputs // only safe threads participate in writing output if( threadIdx.y < O_TILE_WIDTH && threadIdx.x < O_TILE_WIDTH && (row < numRows) && (col < numCols) ){ // Note: this operation is kind of transpose pattern const int thread_1D_pos = row*numCols + col; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) // consider this as 2-D convolution operation with 2-D MASK, here i used tile-convolution // We need load enough elements into shared memory in kernel function, so the width of one block is the tile width + mask with - 1 const dim3 blockSize(BLOCK_WIDTH,BLOCK_WIDTH); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. // We need enough block to cover all pixels in a image const dim3 gridSize( numCols/O_TILE_WIDTH+1, numRows/O_TILE_WIDTH+1, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); // free filter memory checkCudaErrors(cudaFree(d_filter)); }
d1433b8c8a9fc2d48ee491feef6726a1b5c8c3a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/helpers.h> #include <ops/declarable/helpers/hamming.h> namespace sd { namespace ops { namespace helpers { template <typename X, typename Z> static _CUDA_G void _hammingKernel(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, void *vz, void *reductionBuffer, Nd4jLong length) { auto x = reinterpret_cast<X*>(vx); auto y = reinterpret_cast<X*>(vy); auto z = reinterpret_cast<Z*>(vz); __shared__ Nd4jLong *shared; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; shared = reinterpret_cast<Nd4jLong*>(shmem); } __syncthreads(); // we want to nullify temporary memory before accumulating intermediate results shared[threadIdx.x] = 0; auto tid = threadIdx.x + blockIdx.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += blockDim.x * gridDim.x) { auto _x = static_cast<unsigned long long>(x[shape::getIndexOffset(e, xShapeInfo)]); auto _y = static_cast<unsigned long long>(y[shape::getIndexOffset(e, yShapeInfo)]); // we save intermediate result into shared memory shared[threadIdx.x] += __popcll(_x ^ _y); } __syncthreads(); // now we accumulate values auto numItems = sd::math::nd4j_min<Nd4jLong>(blockDim.x, length); auto floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (threadIdx.x >= floorPow2) shared[threadIdx.x - floorPow2] = shared[threadIdx.x - floorPow2] + shared[threadIdx.x]; __syncthreads(); } __syncthreads(); for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (threadIdx.x < activeThreads && threadIdx.x + activeThreads < numItems) shared[threadIdx.x] = shared[threadIdx.x] + shared[threadIdx.x + activeThreads]; __syncthreads(); } __syncthreads(); // FIXME: do we really want atomicAdd on global memory here // and store them to output if (threadIdx.x == 0 && shared[0] > 0) sd::math::atomics::nd4j_atomicAdd<Z>(&z[0], static_cast<Z>(shared[threadIdx.x])); } template <typename X, typename Z> static void _hamming(LaunchContext *context, NDArray &x, NDArray &y, NDArray &z) { hipLaunchKernelGGL(( _hammingKernel<X, Z>), dim3(256), dim3(256), 256 * sizeof(Nd4jLong) + 256, *context->getCudaStream(), x.specialBuffer(), x.specialShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.specialBuffer(), nullptr, x.lengthOf()); } void hamming(LaunchContext *context, NDArray &x, NDArray &y, NDArray &output) { NDArray::prepareSpecialUse({&output}, {&x, &y}); BUILD_DOUBLE_SELECTOR(x.dataType(), output.dataType(), _hamming, (context, x, y, output), INTEGER_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&x, &y}); } } } }
d1433b8c8a9fc2d48ee491feef6726a1b5c8c3a3.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/helpers.h> #include <ops/declarable/helpers/hamming.h> namespace sd { namespace ops { namespace helpers { template <typename X, typename Z> static _CUDA_G void _hammingKernel(void *vx, Nd4jLong *xShapeInfo, void *vy, Nd4jLong *yShapeInfo, void *vz, void *reductionBuffer, Nd4jLong length) { auto x = reinterpret_cast<X*>(vx); auto y = reinterpret_cast<X*>(vy); auto z = reinterpret_cast<Z*>(vz); __shared__ Nd4jLong *shared; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; shared = reinterpret_cast<Nd4jLong*>(shmem); } __syncthreads(); // we want to nullify temporary memory before accumulating intermediate results shared[threadIdx.x] = 0; auto tid = threadIdx.x + blockIdx.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += blockDim.x * gridDim.x) { auto _x = static_cast<unsigned long long>(x[shape::getIndexOffset(e, xShapeInfo)]); auto _y = static_cast<unsigned long long>(y[shape::getIndexOffset(e, yShapeInfo)]); // we save intermediate result into shared memory shared[threadIdx.x] += __popcll(_x ^ _y); } __syncthreads(); // now we accumulate values auto numItems = sd::math::nd4j_min<Nd4jLong>(blockDim.x, length); auto floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1; if (threadIdx.x >= floorPow2) shared[threadIdx.x - floorPow2] = shared[threadIdx.x - floorPow2] + shared[threadIdx.x]; __syncthreads(); } __syncthreads(); for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (threadIdx.x < activeThreads && threadIdx.x + activeThreads < numItems) shared[threadIdx.x] = shared[threadIdx.x] + shared[threadIdx.x + activeThreads]; __syncthreads(); } __syncthreads(); // FIXME: do we really want atomicAdd on global memory here // and store them to output if (threadIdx.x == 0 && shared[0] > 0) sd::math::atomics::nd4j_atomicAdd<Z>(&z[0], static_cast<Z>(shared[threadIdx.x])); } template <typename X, typename Z> static void _hamming(LaunchContext *context, NDArray &x, NDArray &y, NDArray &z) { _hammingKernel<X, Z><<<256, 256, 256 * sizeof(Nd4jLong) + 256, *context->getCudaStream()>>>(x.specialBuffer(), x.specialShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.specialBuffer(), nullptr, x.lengthOf()); } void hamming(LaunchContext *context, NDArray &x, NDArray &y, NDArray &output) { NDArray::prepareSpecialUse({&output}, {&x, &y}); BUILD_DOUBLE_SELECTOR(x.dataType(), output.dataType(), _hamming, (context, x, y, output), INTEGER_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&x, &y}); } } } }
8ae5fff28321736aba12012b70cba2cbc7d65daf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <GL/gl.h> #include <GL/glut.h> #include <math.h> #include <stdbool.h> #include <omp.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <hip/hip_runtime.h> #define PI 3.141592653589793 #define cap 1000 #define ref 1.0 #define temp 4273 #define visc 9 #define GRAV (6.674*0.00000000000000000001) #define density (2.5 * 1000000000000) #define rad 100 #define dev 12 #define M (4 / 3 * PI * rad*rad*rad* density) #define X 0 #define Y 1 #define Z 2 #define ANIM 1000000 #define scale 0.01 #define colmargin 1.1 #define R (rad * scale) #define INIT_WIDTH 800 #define INIT_HEIGHT 800 #define vision 20 #define Grid_x 1 #define Grid_y 1 #define Grid_z 1 #define Block_x 4 #define Block_y 2 #define Block_z 1 #define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z) unsigned int num_points = (dev + 1) * (dev + 1); unsigned int window_width = 500; unsigned int window_height = 500; double init_left = -10000; double init_right = 10000; double init_bottom = -10000; double init_top = 10000; double left, right, bottom, top; float h_point[NUM_POINTS][3]; float v_point[NUM_POINTS][3]; float st_point[NUM_POINTS]; float e_point[NUM_POINTS]; float T_point[NUM_POINTS]; float J_point[NUM_POINTS]; float anim_time = ANIM; float anim_dt = 0.1; double phi = 30.0; double theta = 30.0; float light_pos[4]; int mouse_old_x, mouse_old_y; bool motion_p; double eye[3]; double center[3] = {0.0, 0.0, 0.0}; double up[3]; double ** point; float (*d_point)[3]; float (*dv_point)[3]; float (*dst_point); float (*de_point); float (*dT_point); float (*dJ_point); __global__ void grav_v(float (*pos)[3], float(*vec)[3] ,float(*sti),float(*e),float(*T),float(*J), float time, float dt); __global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt); texture<float,2> timetocol; texture<float,2> indextocol; // CUDA void bindTextures(float **coltime, float **colindex) { hipChannelFormatDesc desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipBindTexture(0, timetocol, coltime, desc); hipBindTexture(0, indextocol,colindex, desc); } // CUDA void unbindTextures(void) { hipUnbindTexture(timetocol); hipUnbindTexture(indextocol); } // double dot(double vec0[], double vec1[]) { return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]); } void cross(double vec0[], double vec1[], double vec2[]) { vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y]; vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z]; vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X]; } void normVec(double vec[]) { double norm; norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]); vec[X] /= norm; vec[Y] /= norm; vec[Z] /= norm; } void normal(double p0[], double p1[], double p2[], double normal[]) { unsigned int i; double v0[3], v1[3]; for (i = 0; i < 3; i++) { v0[i] = p2[i] - p1[i]; v1[i] = p0[i] - p1[i]; } cross(v0, v1, normal); normVec(normal); } // __global__ void grav_v(float (*pos)[3],float(*vec)[3],float(*sti),float(*e),float(*T),float(*J), float time, float dt) { double xn,yn,zn,vx,vy,vz,dis,sq; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; double v_buff[3]={0}; int colnum=0; double gravity=0; __shared__ float coltime[NUM_POINTS][NUM_POINTS][2]; for (int i = 0 ; i < NUM_POINTS; i++) { coltime[index][i][0]=0; coltime[index][i][0]=0; } __syncthreads(); xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; v_buff[0]=vx; v_buff[1]=vy; v_buff[2]=vz; for (int i = 0 ; i < NUM_POINTS; i++) { sq = pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); gravity=GRAV*M/sq*scale*scale; dis = sqrt(sq); // if (dis > 2 * R * colmargin && i != index) { // J[index]-=0.5*M*fabs((float)(pow((double)vec[i][0],2)+pow((double)vec[i][1],2)+pow((double)vec[i][2],2))-(float)(pow((double)(vec[i][0] + ((pos[i][0]-xn)/dis)*gravity*ANIM),2)+pow((double)(vec[i][1] + ((pos[i][1]-yn)/dis)*gravity*ANIM),2)+pow((double)(vec[i][2] + ((pos[i][2]-zn)/dis)*gravity*ANIM),2))); // vx = vx + ((pos[i][0]-xn)/dis)*gravity*ANIM*scale; vy = vy + ((pos[i][1]-yn)/dis)*gravity*ANIM*scale; vz = vz + ((pos[i][2]-zn)/dis)*gravity*ANIM*scale; } else { if (i != index && coltime[index][i][1] == 0){ //TBD coltime[index][i][1]=(double)i+1; coltime[i][index][1]=(double)index+1; } } } __syncthreads(); for (int i = 0 ; i < NUM_POINTS; i++) { if(coltime[index][i][1] > 0){ coltime[index][i][0]=(float)(2*R*colmargin - dis)/(pow((double)(vx-vec[i][0]),2)+pow((double)(vy-vec[i][1]),2)+pow((double)(vz-vec[i][2]),2)); colnum++; } } __syncthreads(); if(colnum>0) { // float tmp[2]={0}; for (int i = 0 ; i < NUM_POINTS; i++){ for(int j = i+1; j < NUM_POINTS; j++){ if(coltime[index][i][0] > coltime[index][j][0]){ tmp[0]=coltime[index][i][0]; tmp[1]=coltime[index][i][1]; coltime[index][i][0]=coltime[index][j][0]; coltime[index][i][1]=coltime[index][j][1]; coltime[index][j][0]=tmp[0]; coltime[index][j][1]=tmp[1]; } } } // for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){ int colindex=coltime[index][i][1]-1; float repul=0; repul=e[index]; // if (e[colindex] < e[index]) { repul=e[colindex]; } // v_buff[0]=(double)((1+repul)*M*vec[colindex][0]+(M-repul*M)*v_buff[0])/(M+M); v_buff[1]=(double)((1+repul)*M*vec[colindex][1]+(M-repul*M)*v_buff[1])/(M+M); v_buff[2]=(double)((1+repul)*M*vec[colindex][2]+(M-repul*M)*v_buff[2])/(M+M); //m^2/3*sti //float Energy=0.5*(1-repul*repul)*(M*(pow((double)vx,2)+pow((double)vy,2)+pow((double)vz,2)) + M*(pow((double)vec[colindex][0],2)+pow((double)vec[colindex][1],2)+pow((double)vec[colindex][2],2))); //J[index]+=Energy * pow((double)M,0.667) * pow(10.0,(double)sti[index]) / (pow((double)M,0.667) * pow(10.0,(double)sti[index]) + pow((double)M,0.667) * pow(10.0,(double)sti[colindex])); //T[index]=(J[index]-0.5*M*(pow((double)v_buff[0],2)+pow((double)v_buff[1],2)+pow((double)v_buff[2],2)))/M/cap; // //e[index] = e[index] * exp((pow(10.0,(double)(visc-(T[index]-temp/100)))-pow(10.0,(double)sti[index]))/pow((double)M,0.5)); //sti[index] = visc - (T[index] - temp / 100); } } __syncthreads(); if (colnum>0) { vec[index][0] = (float)v_buff[0]; vec[index][1] = (float)v_buff[1]; vec[index][2] = (float)v_buff[2]; } else { vec[index][0] = (float)vx; vec[index][1] = (float)vy; vec[index][2] = (float)vz; } // //T[index]=(J[index]-0.5*M*(pow((double)vec[index][0],2)+pow((double)vec[index][1],2)+pow((double)vec[index][2],2)))/M/cap; // //e[index] = e[index] * exp((pow(10.0,(double)(visc-(T[index]-temp/100)))-pow(10.0,(double)sti[index]))/pow((double)M,0.5)); //sti[index] = visc - (T[index] - temp / 100); } // __global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt) { double xn,yn,zn,vx,vy,vz; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; pos[index][0] = xn + vx * dt; pos[index][1] = yn + vy * dt; pos[index][2] = zn + vz * dt; } // void setInitialPosition(void) { srand(12131); for (int i = 0; i < NUM_POINTS; i++) { if(rand()%2==1) { h_point[i][0] = (double)rand() / RAND_MAX * INIT_WIDTH / 20; h_point[i][1] = (double)rand() / RAND_MAX * INIT_WIDTH / 20; h_point[i][2] = (double)rand() / RAND_MAX * INIT_WIDTH / 20; } else { h_point[i][0] = -(double)rand() / RAND_MAX * INIT_WIDTH / 20; h_point[i][1] = -(double)rand() / RAND_MAX * INIT_WIDTH / 20; h_point[i][2] = -(double)rand() / RAND_MAX * INIT_WIDTH / 20; } } for (int i = 0; i < NUM_POINTS; i++) { v_point[i][0] = 0; v_point[i][1] = 0; v_point[i][2] = 0; st_point[i]=visc; e_point[i]=ref; T_point[i]=temp; J_point[i]=cap*M*temp; } checkCudaErrors(hipMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dst_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&de_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dT_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float))); checkCudaErrors(hipMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dT_point, T_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , hipMemcpyHostToDevice)); } //CUDA void launchGPUKernel(unsigned int num_particles, float (*pos)[3], float (*vec)[3] ,float(*sti),float(*e),float(*T),float(*J), float time, float dt) { dim3 grid(Grid_x,Grid_y,Grid_z); dim3 block(Block_x,Block_y,Block_z); hipLaunchKernelGGL(( grav_v), dim3(grid) , dim3(block), 0, 0, pos, vec, sti, e, T, J, time, dt); hipLaunchKernelGGL(( grav_p), dim3(grid) , dim3(block), 0, 0, pos, vec, time, dt); } // void runGPUKernel(void) { launchGPUKernel(NUM_POINTS, d_point, dv_point ,dst_point, de_point,dT_point,dJ_point, anim_time, anim_dt); checkCudaErrors(hipMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(T_point, dT_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , hipMemcpyDeviceToHost)); anim_time += anim_dt; } // void defineViewMatrix(double phi, double theta) { unsigned int i; double c, s, xy_dist; double x_axis[3], y_axis[3], z_axis[3]; // eye[Z] = sin(theta * PI / 180.0); xy_dist = cos(theta * PI / 180.0); c = cos(phi * PI / 180.0); s = sin(phi * PI / 180.0); eye[X] = xy_dist * c; eye[Y] = xy_dist * s; up[X] = - c * eye[Z]; up[Y] = - s * eye[Z]; up[Z] = s * eye[Y] + c * eye[X]; normVec(up); // for (i = 0; i < 3; i++) { z_axis[i] = eye[i] - center[i]; } normVec(z_axis); cross(up, z_axis, x_axis); normVec(x_axis); cross(z_axis, x_axis, y_axis); gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]); } void display(void) { double nrml_vec[3]; light_pos[0] = (float)eye[X]; light_pos[1] = (float)eye[Y]; light_pos[2] = (float)eye[Z]; light_pos[3] = 0.0f; //CUDA runGPUKernel(); // glLightfv(GL_LIGHT0, GL_POSITION, light_pos); //glLightfv(GL_LIGHT0, GL_DIFFUSE, light_pos); glEnable(GL_LIGHTING); glMatrixMode(GL_PROJECTION); glFrustum(-1000000, 1000000, -1000000, 1000000, -1000000, 1000000); glLoadIdentity(); glOrtho(-vision, vision, -vision, vision, -1000, 1000); glViewport(0, 0, window_width, window_height); defineViewMatrix(phi, theta); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glBegin(GL_QUADS); // TBD for (int k = 0 ; k < NUM_POINTS ; k++) { for (int i = 0 ; i < dev + 1 ; i ++) { for (int j = 0 ; j < 2 * dev + 1 ; j++) { normal(point[i * (dev-1) + j],point[(i + 1) * (dev-1) + j + 1],point[(i+1) * (dev-1) + j],nrml_vec); glNormal3dv(nrml_vec); glVertex3d(point[i * (dev-1) + j][X] + h_point[k][X], point[i * (dev-1) + j][Y] + h_point[k][Y], point[i * (dev-1) + j][Z] + h_point[k][Z]); glVertex3d(point[(i + 1) * (dev-1) + j][X] + h_point[k][X],point[(i + 1) * (dev-1) + j][Y] + h_point[k][Y],point[(i + 1) * (dev-1) + j][Z] + h_point[k][Z]); glVertex3d(point[(i + 1) * (dev-1) + j + 1][X] + h_point[k][X], point[(i + 1) * (dev-1) + j + 1][Y] + h_point[k][Y], point[(i + 1) * (dev-1) + j + 1][Z] + h_point[k][Z]); glVertex3d(point[i * (dev-1) + j + 1][X] + h_point[k][X],point[i * (dev-1) + j + 1][Y] + h_point[k][Y],point[i * (dev-1) + j + 1][Z] + h_point[k][Z]); } } } glEnd(); glutSwapBuffers(); glutPostRedisplay(); } void mouse_button(int button, int state, int x, int y) { if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON)) motion_p = true; else if (state == GLUT_UP) motion_p = false; mouse_old_x = x; mouse_old_y = y; } void mouse_motion(int x, int y) { int dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (motion_p) { phi -= dx * 0.2; theta += dy * 0.2; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void resize (int width, int height) { window_width = width; window_height = height; } bool initGL(void) { glClearColor(0.0f, 0.0f , 0.0f, 0.5f); glEnable(GL_DEPTH_TEST); glClearDepth(1.0); glDepthFunc(GL_LESS); glEnable(GL_LIGHT0); return true; } int main(int argc, char** argv) { double yangle,zangle; double r; point = (double **)malloc(sizeof(double *) * num_points); for (int i = 0 ; i < num_points ; i++) { point[i] = (double *)malloc(sizeof(double) * 3); } for (int i = 0 ; i < dev + 1; i ++) { zangle = i * PI / dev; r=R * sin(zangle); for (int j = 0 ; j < dev + 1; j++) { yangle=j * PI * 2 / dev; point[i * dev + j][X] = r * sin(yangle); point[i * dev + j][Y] = r * cos(yangle); point[i * dev + j][Z] = R * cos(zangle); } } glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(INIT_WIDTH, INIT_HEIGHT); glutCreateWindow("3D CUDA Simulation"); glutDisplayFunc(display); glutReshapeFunc(resize); glutMouseFunc(mouse_button); glutMotionFunc(mouse_motion); setInitialPosition(); if (!initGL()) return 1; glutMainLoop(); hipFree(d_point); hipFree(dv_point); hipFree(dst_point); hipFree(de_point); hipFree(dT_point); hipFree(dJ_point); hipDeviceReset(); for (int i = 0 ; i < num_points ; i++) { free (point[i]); } free (point); return 0; }
8ae5fff28321736aba12012b70cba2cbc7d65daf.cu
#include <stdio.h> #include <stdlib.h> #include <GL/gl.h> #include <GL/glut.h> #include <math.h> #include <stdbool.h> #include <omp.h> #include <cuda.h> #include <helper_cuda.h> #include <helper_functions.h> #include <cuda_runtime.h> #define PI 3.141592653589793 #define cap 1000 #define ref 1.0 #define temp 4273 #define visc 9 #define GRAV (6.674*0.00000000000000000001) #define density (2.5 * 1000000000000) #define rad 100 #define dev 12 #define M (4 / 3 * PI * rad*rad*rad* density) #define X 0 #define Y 1 #define Z 2 #define ANIM 1000000 #define scale 0.01 #define colmargin 1.1 #define R (rad * scale) #define INIT_WIDTH 800 #define INIT_HEIGHT 800 #define vision 20 #define Grid_x 1 #define Grid_y 1 #define Grid_z 1 #define Block_x 4 #define Block_y 2 #define Block_z 1 #define NUM_POINTS (Grid_x*Grid_y*Grid_z*Block_x*Block_y*Block_z) unsigned int num_points = (dev + 1) * (dev + 1); unsigned int window_width = 500; unsigned int window_height = 500; double init_left = -10000; double init_right = 10000; double init_bottom = -10000; double init_top = 10000; double left, right, bottom, top; float h_point[NUM_POINTS][3]; float v_point[NUM_POINTS][3]; float st_point[NUM_POINTS]; float e_point[NUM_POINTS]; float T_point[NUM_POINTS]; float J_point[NUM_POINTS]; float anim_time = ANIM; float anim_dt = 0.1; double phi = 30.0; double theta = 30.0; float light_pos[4]; int mouse_old_x, mouse_old_y; bool motion_p; double eye[3]; double center[3] = {0.0, 0.0, 0.0}; double up[3]; double ** point; float (*d_point)[3]; float (*dv_point)[3]; float (*dst_point); float (*de_point); float (*dT_point); float (*dJ_point); __global__ void grav_v(float (*pos)[3], float(*vec)[3] ,float(*sti),float(*e),float(*T),float(*J), float time, float dt); __global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt); texture<float,2> timetocol; texture<float,2> indextocol; // CUDAテクスチャのバインド. void bindTextures(float **coltime, float **colindex) { cudaChannelFormatDesc desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaBindTexture(0, timetocol, coltime, desc); cudaBindTexture(0, indextocol,colindex, desc); } // CUDAテクスチャのアンバインド void unbindTextures(void) { cudaUnbindTexture(timetocol); cudaUnbindTexture(indextocol); } //基本関数群 double dot(double vec0[], double vec1[]) { return(vec0[X] * vec1[X] + vec0[Y] * vec1[Y] + vec0[Z] * vec1[Z]); } void cross(double vec0[], double vec1[], double vec2[]) { vec2[X] = vec0[Y] * vec1[Z] - vec0[Z] * vec1[Y]; vec2[Y] = vec0[Z] * vec1[X] - vec0[X] * vec1[Z]; vec2[Z] = vec0[X] * vec1[Y] - vec0[Y] * vec1[X]; } void normVec(double vec[]) { double norm; norm = sqrt(vec[X] * vec[X] + vec[Y] * vec[Y] + vec[Z] * vec[Z]); vec[X] /= norm; vec[Y] /= norm; vec[Z] /= norm; } void normal(double p0[], double p1[], double p2[], double normal[]) { unsigned int i; double v0[3], v1[3]; for (i = 0; i < 3; i++) { v0[i] = p2[i] - p1[i]; v1[i] = p0[i] - p1[i]; } cross(v0, v1, normal); normVec(normal); } //重力影響後の速度を決定 __global__ void grav_v(float (*pos)[3],float(*vec)[3],float(*sti),float(*e),float(*T),float(*J), float time, float dt) { double xn,yn,zn,vx,vy,vz,dis,sq; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; double v_buff[3]={0}; int colnum=0; double gravity=0; __shared__ float coltime[NUM_POINTS][NUM_POINTS][2]; for (int i = 0 ; i < NUM_POINTS; i++) { coltime[index][i][0]=0; coltime[index][i][0]=0; } __syncthreads(); xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; v_buff[0]=vx; v_buff[1]=vy; v_buff[2]=vz; for (int i = 0 ; i < NUM_POINTS; i++) { sq = pow((double)(xn-pos[i][0]),2) + pow((double)(yn-pos[i][1]),2) + pow((double)(zn-pos[i][2]),2); gravity=GRAV*M/sq*scale*scale; dis = sqrt(sq); //衝突域侵入判定 if (dis > 2 * R * colmargin && i != index) { //他粒子へ与える運動エネルギーと内部エネルギーの交換 J[index]-=0.5*M*fabs((float)(pow((double)vec[i][0],2)+pow((double)vec[i][1],2)+pow((double)vec[i][2],2))-(float)(pow((double)(vec[i][0] + ((pos[i][0]-xn)/dis)*gravity*ANIM),2)+pow((double)(vec[i][1] + ((pos[i][1]-yn)/dis)*gravity*ANIM),2)+pow((double)(vec[i][2] + ((pos[i][2]-zn)/dis)*gravity*ANIM),2))); //速度更新 vx = vx + ((pos[i][0]-xn)/dis)*gravity*ANIM*scale; vy = vy + ((pos[i][1]-yn)/dis)*gravity*ANIM*scale; vz = vz + ((pos[i][2]-zn)/dis)*gravity*ANIM*scale; } else { if (i != index && coltime[index][i][1] == 0){ //衝突域侵入からの経過の時間を記録 TBD 法線方向に直す coltime[index][i][1]=(double)i+1; coltime[i][index][1]=(double)index+1; } } } __syncthreads(); for (int i = 0 ; i < NUM_POINTS; i++) { if(coltime[index][i][1] > 0){ coltime[index][i][0]=(float)(2*R*colmargin - dis)/(pow((double)(vx-vec[i][0]),2)+pow((double)(vy-vec[i][1]),2)+pow((double)(vz-vec[i][2]),2)); colnum++; } } __syncthreads(); if(colnum>0) { //衝突域侵入からの経過時間をインデックス付きソート float tmp[2]={0}; for (int i = 0 ; i < NUM_POINTS; i++){ for(int j = i+1; j < NUM_POINTS; j++){ if(coltime[index][i][0] > coltime[index][j][0]){ tmp[0]=coltime[index][i][0]; tmp[1]=coltime[index][i][1]; coltime[index][i][0]=coltime[index][j][0]; coltime[index][i][1]=coltime[index][j][1]; coltime[index][j][0]=tmp[0]; coltime[index][j][1]=tmp[1]; } } } //衝突域侵入からの経過時間が長いものから処理 for (int i=NUM_POINTS-1 ; i>=NUM_POINTS-colnum; i--){ int colindex=coltime[index][i][1]-1; float repul=0; repul=e[index]; //反発係数は小さいほうを優先 if (e[colindex] < e[index]) { repul=e[colindex]; } //速度更新 v_buff[0]=(double)((1+repul)*M*vec[colindex][0]+(M-repul*M)*v_buff[0])/(M+M); v_buff[1]=(double)((1+repul)*M*vec[colindex][1]+(M-repul*M)*v_buff[1])/(M+M); v_buff[2]=(double)((1+repul)*M*vec[colindex][2]+(M-repul*M)*v_buff[2])/(M+M); //衝突エネルギーをm^2/3*stiの比で分配し熱エネルギー変換 //float Energy=0.5*(1-repul*repul)*(M*(pow((double)vx,2)+pow((double)vy,2)+pow((double)vz,2)) + M*(pow((double)vec[colindex][0],2)+pow((double)vec[colindex][1],2)+pow((double)vec[colindex][2],2))); //J[index]+=Energy * pow((double)M,0.667) * pow(10.0,(double)sti[index]) / (pow((double)M,0.667) * pow(10.0,(double)sti[index]) + pow((double)M,0.667) * pow(10.0,(double)sti[colindex])); //T[index]=(J[index]-0.5*M*(pow((double)v_buff[0],2)+pow((double)v_buff[1],2)+pow((double)v_buff[2],2)))/M/cap; //粘性と反発係数の更新 //e[index] = e[index] * exp((pow(10.0,(double)(visc-(T[index]-temp/100)))-pow(10.0,(double)sti[index]))/pow((double)M,0.5)); //sti[index] = visc - (T[index] - temp / 100); } } __syncthreads(); if (colnum>0) { vec[index][0] = (float)v_buff[0]; vec[index][1] = (float)v_buff[1]; vec[index][2] = (float)v_buff[2]; } else { vec[index][0] = (float)vx; vec[index][1] = (float)vy; vec[index][2] = (float)vz; } //内部エネルギーと運動エネルギーから熱エネルギー更新 //T[index]=(J[index]-0.5*M*(pow((double)vec[index][0],2)+pow((double)vec[index][1],2)+pow((double)vec[index][2],2)))/M/cap; //粘性と反発係数の更新 //e[index] = e[index] * exp((pow(10.0,(double)(visc-(T[index]-temp/100)))-pow(10.0,(double)sti[index]))/pow((double)M,0.5)); //sti[index] = visc - (T[index] - temp / 100); } //重力影響後の座標を決定 __global__ void grav_p(float (*pos)[3], float(*vec)[3] , float time, float dt) { double xn,yn,zn,vx,vy,vz; unsigned int thread_idx = threadIdx.x+blockDim.x*blockIdx.x; unsigned int thread_idy = threadIdx.y+blockDim.y*blockIdx.y; unsigned int thread_idz = threadIdx.z+blockDim.z*blockIdx.z; unsigned int index = (blockDim.x * Grid_x) * (blockDim.y * Grid_y) * thread_idz + (blockDim.x * Grid_x) * thread_idy + thread_idx ; xn = pos[index][0]; yn = pos[index][1]; zn = pos[index][2]; vx = vec[index][0]; vy = vec[index][1]; vz = vec[index][2]; pos[index][0] = xn + vx * dt; pos[index][1] = yn + vy * dt; pos[index][2] = zn + vz * dt; } // 粒子を初期位置に配置. void setInitialPosition(void) { srand(12131); for (int i = 0; i < NUM_POINTS; i++) { if(rand()%2==1) { h_point[i][0] = (double)rand() / RAND_MAX * INIT_WIDTH / 20; h_point[i][1] = (double)rand() / RAND_MAX * INIT_WIDTH / 20; h_point[i][2] = (double)rand() / RAND_MAX * INIT_WIDTH / 20; } else { h_point[i][0] = -(double)rand() / RAND_MAX * INIT_WIDTH / 20; h_point[i][1] = -(double)rand() / RAND_MAX * INIT_WIDTH / 20; h_point[i][2] = -(double)rand() / RAND_MAX * INIT_WIDTH / 20; } } for (int i = 0; i < NUM_POINTS; i++) { v_point[i][0] = 0; v_point[i][1] = 0; v_point[i][2] = 0; st_point[i]=visc; e_point[i]=ref; T_point[i]=temp; J_point[i]=cap*M*temp; } checkCudaErrors(cudaMalloc((void**)&d_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dv_point, 3 * NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dst_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&de_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dT_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dJ_point, NUM_POINTS * sizeof(float))); checkCudaErrors(cudaMemcpy(d_point, h_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dv_point, v_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dst_point, st_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(de_point, e_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dT_point, T_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dJ_point, J_point, NUM_POINTS * sizeof(float) , cudaMemcpyHostToDevice)); } //CUDA実行関数 void launchGPUKernel(unsigned int num_particles, float (*pos)[3], float (*vec)[3] ,float(*sti),float(*e),float(*T),float(*J), float time, float dt) { dim3 grid(Grid_x,Grid_y,Grid_z); dim3 block(Block_x,Block_y,Block_z); grav_v<<<grid , block>>>(pos, vec, sti, e, T, J, time, dt); grav_p<<<grid , block>>>(pos, vec, time, dt); } //アニメーション動作 void runGPUKernel(void) { launchGPUKernel(NUM_POINTS, d_point, dv_point ,dst_point, de_point,dT_point,dJ_point, anim_time, anim_dt); checkCudaErrors(cudaMemcpy(h_point, d_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(v_point, dv_point, 3 * NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(st_point, dst_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(e_point, de_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(T_point, dT_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(J_point, dJ_point, NUM_POINTS * sizeof(float) , cudaMemcpyDeviceToHost)); anim_time += anim_dt; } //ビュー定義 void defineViewMatrix(double phi, double theta) { unsigned int i; double c, s, xy_dist; double x_axis[3], y_axis[3], z_axis[3]; // 視点の設定. eye[Z] = sin(theta * PI / 180.0); xy_dist = cos(theta * PI / 180.0); c = cos(phi * PI / 180.0); s = sin(phi * PI / 180.0); eye[X] = xy_dist * c; eye[Y] = xy_dist * s; up[X] = - c * eye[Z]; up[Y] = - s * eye[Z]; up[Z] = s * eye[Y] + c * eye[X]; normVec(up); // 視点を原点とする座標系の定義. for (i = 0; i < 3; i++) { z_axis[i] = eye[i] - center[i]; } normVec(z_axis); cross(up, z_axis, x_axis); normVec(x_axis); cross(z_axis, x_axis, y_axis); gluLookAt(eye[X], eye[Y], eye[Z], center[X], center[Y], center[Z], up[X], up[Y], up[Z]); } void display(void) { double nrml_vec[3]; light_pos[0] = (float)eye[X]; light_pos[1] = (float)eye[Y]; light_pos[2] = (float)eye[Z]; light_pos[3] = 0.0f; //CUDA開始 runGPUKernel(); // 光源の設定 glLightfv(GL_LIGHT0, GL_POSITION, light_pos); //glLightfv(GL_LIGHT0, GL_DIFFUSE, light_pos); glEnable(GL_LIGHTING); glMatrixMode(GL_PROJECTION); glFrustum(-1000000, 1000000, -1000000, 1000000, -1000000, 1000000); glLoadIdentity(); glOrtho(-vision, vision, -vision, vision, -1000, 1000); glViewport(0, 0, window_width, window_height); defineViewMatrix(phi, theta); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glBegin(GL_QUADS); //球体をポリゴンで作成 TBD メタボール for (int k = 0 ; k < NUM_POINTS ; k++) { for (int i = 0 ; i < dev + 1 ; i ++) { for (int j = 0 ; j < 2 * dev + 1 ; j++) { normal(point[i * (dev-1) + j],point[(i + 1) * (dev-1) + j + 1],point[(i+1) * (dev-1) + j],nrml_vec); glNormal3dv(nrml_vec); glVertex3d(point[i * (dev-1) + j][X] + h_point[k][X], point[i * (dev-1) + j][Y] + h_point[k][Y], point[i * (dev-1) + j][Z] + h_point[k][Z]); glVertex3d(point[(i + 1) * (dev-1) + j][X] + h_point[k][X],point[(i + 1) * (dev-1) + j][Y] + h_point[k][Y],point[(i + 1) * (dev-1) + j][Z] + h_point[k][Z]); glVertex3d(point[(i + 1) * (dev-1) + j + 1][X] + h_point[k][X], point[(i + 1) * (dev-1) + j + 1][Y] + h_point[k][Y], point[(i + 1) * (dev-1) + j + 1][Z] + h_point[k][Z]); glVertex3d(point[i * (dev-1) + j + 1][X] + h_point[k][X],point[i * (dev-1) + j + 1][Y] + h_point[k][Y],point[i * (dev-1) + j + 1][Z] + h_point[k][Z]); } } } glEnd(); glutSwapBuffers(); glutPostRedisplay(); } void mouse_button(int button, int state, int x, int y) { if ((state == GLUT_DOWN) && (button == GLUT_LEFT_BUTTON)) motion_p = true; else if (state == GLUT_UP) motion_p = false; mouse_old_x = x; mouse_old_y = y; } void mouse_motion(int x, int y) { int dx, dy; dx = x - mouse_old_x; dy = y - mouse_old_y; if (motion_p) { phi -= dx * 0.2; theta += dy * 0.2; } mouse_old_x = x; mouse_old_y = y; glutPostRedisplay(); } void resize (int width, int height) { window_width = width; window_height = height; } bool initGL(void) { glClearColor(0.0f, 0.0f , 0.0f, 0.5f); glEnable(GL_DEPTH_TEST); glClearDepth(1.0); glDepthFunc(GL_LESS); glEnable(GL_LIGHT0); return true; } int main(int argc, char** argv) { double yangle,zangle; double r; point = (double **)malloc(sizeof(double *) * num_points); for (int i = 0 ; i < num_points ; i++) { point[i] = (double *)malloc(sizeof(double) * 3); } for (int i = 0 ; i < dev + 1; i ++) { zangle = i * PI / dev; r=R * sin(zangle); for (int j = 0 ; j < dev + 1; j++) { yangle=j * PI * 2 / dev; point[i * dev + j][X] = r * sin(yangle); point[i * dev + j][Y] = r * cos(yangle); point[i * dev + j][Z] = R * cos(zangle); } } glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(INIT_WIDTH, INIT_HEIGHT); glutCreateWindow("3D CUDA Simulation"); glutDisplayFunc(display); glutReshapeFunc(resize); glutMouseFunc(mouse_button); glutMotionFunc(mouse_motion); setInitialPosition(); if (!initGL()) return 1; glutMainLoop(); cudaFree(d_point); cudaFree(dv_point); cudaFree(dst_point); cudaFree(de_point); cudaFree(dT_point); cudaFree(dJ_point); cudaDeviceReset(); for (int i = 0 ; i < num_points ; i++) { free (point[i]); } free (point); return 0; }
8faef9e4b2f1d1abaf300cf02ffebc8368c484a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Available optimizations (value should be used as the first parameter in the command line): 0 - Base -> no optimization 1 - Sham -> shared memory 2 - ZintReg -> for iteration on Z axis (Paulius) 3 - Zint -> for iteration on Z axis without using registers 4 - ShamZintReg -> shared memory + for iteration on Z axis 5 - ShamZint -> shared memory + for iteration on Z axis without registers 6 - ShamZintTempReg -> shared memory + for iteration on Z axis + temporal blocking 7 - Roc -> use of read only cache (__restrict__ and const modifiers) 8 - ShamRoc -> use of shared memory + read only cache (__restrict__ and const modifiers) 9 - RocZintReg -> for iteration on Z axis + read only cache 10 - RocZint -> for iteration on Z axis without registers + read only cache 11 - ShamRocZintTempReg -> shared memory + read only cache + for iteration on Z axis + temporal blocking Known limitations: data grid size must be multiple of BLOCK_SIZE */ #include <stdio.h> //#define PRINT_GOLD //#define PRINT_RESULT #define BLOCK_DIMX 32 #define BLOCK_DIMY 16 #define BLOCK_DIMZ 1 #define RADIUS 6 // Half of the order #define PADDING_SIZE 32 // Error checking function #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ printf("ERROR: Failed to run stmt %s\n", #stmt); \ printf("ERROR: Got CUDA error ... %s\n", hipGetErrorString(err)); \ return -1; \ } \ } while(0) __constant__ float coeff[RADIUS*6+1]; /* Optimization Base: baseline code (no optimization) */ __global__ void calcStencilBase(float *a, float *b, int pitchedDimx, int dimy) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Compute stencil b[index] = coeff[0] * a[index] + coeff[1] * a[index - 6] + coeff[2] * a[index - 5] + coeff[3] * a[index - 4] + coeff[4] * a[index - 3] + coeff[5] * a[index - 2] + coeff[6] * a[index - 1] + coeff[7] * a[index + 1] + coeff[8] * a[index + 2] + coeff[9] * a[index + 3] + coeff[10] * a[index + 4] + coeff[11] * a[index + 5] + coeff[12] * a[index + 6] + coeff[13] * a[index - 6*pitchedDimx] + coeff[14] * a[index - 5*pitchedDimx] + coeff[15] * a[index - 4*pitchedDimx] + coeff[16] * a[index - 3*pitchedDimx] + coeff[17] * a[index - 2*pitchedDimx] + coeff[18] * a[index - pitchedDimx] + coeff[19] * a[index + pitchedDimx] + coeff[20] * a[index + 2*pitchedDimx] + coeff[21] * a[index + 3*pitchedDimx] + coeff[22] * a[index + 4*pitchedDimx] + coeff[23] * a[index + 5*pitchedDimx] + coeff[24] * a[index + 6*pitchedDimx] + coeff[25] * a[index - 6*stride] + coeff[26] * a[index - 5*stride] + coeff[27] * a[index - 4*stride] + coeff[28] * a[index - 3*stride] + coeff[29] * a[index - 2*stride] + coeff[30] * a[index - stride] + coeff[31] * a[index + stride] + coeff[32] * a[index + 2*stride] + coeff[33] * a[index + 3*stride] + coeff[34] * a[index + 4*stride] + coeff[35] * a[index + 5*stride] + coeff[36] * a[index + 6*stride]; } /* Optimization Sham: shared memory */ __global__ void calcStencilSham(float *a, float *b, int pitchedDimx, int dimy) { // Shared Memory Declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[index - (RADIUS*pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[index + (BLOCK_DIMY*pitchedDimx)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = a[index]; __syncthreads(); // Compute stencil b[index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 6] + coeff[2] * ds_a[ty][sharedTx - 5] + coeff[3] * ds_a[ty][sharedTx - 4] + coeff[4] * ds_a[ty][sharedTx - 3] + coeff[5] * ds_a[ty][sharedTx - 2] + coeff[6] * ds_a[ty][sharedTx - 1] + coeff[7] * ds_a[ty][sharedTx + 1] + coeff[8] * ds_a[ty][sharedTx + 2] + coeff[9] * ds_a[ty][sharedTx + 3] + coeff[10] * ds_a[ty][sharedTx + 4] + coeff[11] * ds_a[ty][sharedTx + 5] + coeff[12] * ds_a[ty][sharedTx + 6] + coeff[13] * ds_a[ty - 6][sharedTx] + coeff[14] * ds_a[ty - 5][sharedTx] + coeff[15] * ds_a[ty - 4][sharedTx] + coeff[16] * ds_a[ty - 3][sharedTx] + coeff[17] * ds_a[ty - 2][sharedTx] + coeff[18] * ds_a[ty - 1][sharedTx] + coeff[19] * ds_a[ty + 1][sharedTx] + coeff[20] * ds_a[ty + 2][sharedTx] + coeff[21] * ds_a[ty + 3][sharedTx] + coeff[22] * ds_a[ty + 4][sharedTx] + coeff[23] * ds_a[ty + 5][sharedTx] + coeff[24] * ds_a[ty + 6][sharedTx] + coeff[25] * a[index - 6*stride] + coeff[26] * a[index - 5*stride] + coeff[27] * a[index - 4*stride] + coeff[28] * a[index - 3*stride] + coeff[29] * a[index - 2*stride] + coeff[30] * a[index - stride] + coeff[31] * a[index + stride] + coeff[32] * a[index + 2*stride] + coeff[33] * a[index + 3*stride] + coeff[34] * a[index + 4*stride] + coeff[35] * a[index + 5*stride] + coeff[36] * a[index + 6*stride]; } /* Optimization ZintReg: for iteration on Z axis with registers */ __global__ void calcStencilZintReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5, infront6; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5, behind6; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind6 will be loaded inside the next 'for') behind5 = a[in_index]; in_index += stride; behind4 = a[in_index]; in_index += stride; behind3 = a[in_index]; in_index += stride; behind2 = a[in_index]; in_index += stride; behind1 = a[in_index]; in_index += stride; current = a[in_index]; out_index = in_index; in_index += stride; infront1 = a[in_index]; in_index += stride; infront2 = a[in_index]; in_index += stride; infront3 = a[in_index]; in_index += stride; infront4 = a[in_index]; in_index += stride; infront5 = a[in_index]; in_index += stride; infront6 = a[in_index]; in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind6 = behind5; behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = infront6; infront6 = a[in_index]; in_index += stride; out_index += stride; // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * a[out_index - 6] + coeff[2] * a[out_index - 5] + coeff[3] * a[out_index - 4] + coeff[4] * a[out_index - 3] + coeff[5] * a[out_index - 2] + coeff[6] * a[out_index - 1] + coeff[7] * a[out_index + 1] + coeff[8] * a[out_index + 2] + coeff[9] * a[out_index + 3] + coeff[10] * a[out_index + 4] + coeff[11] * a[out_index + 5] + coeff[12] * a[out_index + 6] + coeff[13] * a[out_index - 6*pitchedDimx] + coeff[14] * a[out_index - 5*pitchedDimx] + coeff[15] * a[out_index - 4*pitchedDimx] + coeff[16] * a[out_index - 3*pitchedDimx] + coeff[17] * a[out_index - 2*pitchedDimx] + coeff[18] * a[out_index - pitchedDimx] + coeff[19] * a[out_index + pitchedDimx] + coeff[20] * a[out_index + 2*pitchedDimx] + coeff[21] * a[out_index + 3*pitchedDimx] + coeff[22] * a[out_index + 4*pitchedDimx] + coeff[23] * a[out_index + 5*pitchedDimx] + coeff[24] * a[out_index + 6*pitchedDimx] + coeff[25] * behind6 + coeff[26] * behind5 + coeff[27] * behind4 + coeff[28] * behind3 + coeff[29] * behind2 + coeff[30] * behind1 + coeff[31] * infront1 + coeff[32] * infront2 + coeff[33] * infront3 + coeff[34] * infront4 + coeff[35] * infront5 + coeff[36] * infront6; } } /* Optimization Zint: for iteration on Z axis without using registers */ __global__ void calcStencilZint(float *a, float *b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for writing output out_index += 5*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Compute stencil b[out_index] = coeff[0] * a[out_index] + coeff[1] * a[out_index - 6] + coeff[2] * a[out_index - 5] + coeff[3] * a[out_index - 4] + coeff[4] * a[out_index - 3] + coeff[5] * a[out_index - 2] + coeff[6] * a[out_index - 1] + coeff[7] * a[out_index + 1] + coeff[8] * a[out_index + 2] + coeff[9] * a[out_index + 3] + coeff[10] * a[out_index + 4] + coeff[11] * a[out_index + 5] + coeff[12] * a[out_index + 6] + coeff[13] * a[out_index - 6*pitchedDimx] + coeff[14] * a[out_index - 5*pitchedDimx] + coeff[15] * a[out_index - 4*pitchedDimx] + coeff[16] * a[out_index - 3*pitchedDimx] + coeff[17] * a[out_index - 2*pitchedDimx] + coeff[18] * a[out_index - pitchedDimx] + coeff[19] * a[out_index + pitchedDimx] + coeff[20] * a[out_index + 2*pitchedDimx] + coeff[21] * a[out_index + 3*pitchedDimx] + coeff[22] * a[out_index + 4*pitchedDimx] + coeff[23] * a[out_index + 5*pitchedDimx] + coeff[24] * a[out_index + 6*pitchedDimx] + coeff[25] * a[out_index - 6*stride] + coeff[26] * a[out_index - 5*stride] + coeff[27] * a[out_index - 4*stride] + coeff[28] * a[out_index - 3*stride] + coeff[29] * a[out_index - 2*stride] + coeff[30] * a[out_index - stride] + coeff[31] * a[out_index + stride] + coeff[32] * a[out_index + 2*stride] + coeff[33] * a[out_index + 3*stride] + coeff[34] * a[out_index + 4*stride] + coeff[35] * a[out_index + 5*stride] + coeff[36] * a[out_index + 6*stride]; } } /* Optimization ShamZintReg: for iteration on Z axis + use of shared memory */ __global__ void calcStencilShamZintReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5, infront6; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5, behind6; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind6 will be loaded inside the next 'for') behind5 = a[in_index]; in_index += stride; behind4 = a[in_index]; in_index += stride; behind3 = a[in_index]; in_index += stride; behind2 = a[in_index]; in_index += stride; behind1 = a[in_index]; in_index += stride; current = a[in_index]; out_index = in_index; in_index += stride; infront1 = a[in_index]; in_index += stride; infront2 = a[in_index]; in_index += stride; infront3 = a[in_index]; in_index += stride; infront4 = a[in_index]; in_index += stride; infront5 = a[in_index]; in_index += stride; infront6 = a[in_index]; in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind6 = behind5; behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = infront6; infront6 = a[in_index]; in_index += stride; out_index += stride; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[out_index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = current; __syncthreads(); // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * ds_a[ty][sharedTx - 6] + coeff[2] * ds_a[ty][sharedTx - 5] + coeff[3] * ds_a[ty][sharedTx - 4] + coeff[4] * ds_a[ty][sharedTx - 3] + coeff[5] * ds_a[ty][sharedTx - 2] + coeff[6] * ds_a[ty][sharedTx - 1] + coeff[7] * ds_a[ty][sharedTx + 1] + coeff[8] * ds_a[ty][sharedTx + 2] + coeff[9] * ds_a[ty][sharedTx + 3] + coeff[10] * ds_a[ty][sharedTx + 4] + coeff[11] * ds_a[ty][sharedTx + 5] + coeff[12] * ds_a[ty][sharedTx + 6] + coeff[13] * ds_a[ty - 6][sharedTx] + coeff[14] * ds_a[ty - 5][sharedTx] + coeff[15] * ds_a[ty - 4][sharedTx] + coeff[16] * ds_a[ty - 3][sharedTx] + coeff[17] * ds_a[ty - 2][sharedTx] + coeff[18] * ds_a[ty - 1][sharedTx] + coeff[19] * ds_a[ty + 1][sharedTx] + coeff[20] * ds_a[ty + 2][sharedTx] + coeff[21] * ds_a[ty + 3][sharedTx] + coeff[22] * ds_a[ty + 4][sharedTx] + coeff[23] * ds_a[ty + 5][sharedTx] + coeff[24] * ds_a[ty + 6][sharedTx] + coeff[25] * behind6 + coeff[26] * behind5 + coeff[27] * behind4 + coeff[28] * behind3 + coeff[29] * behind2 + coeff[30] * behind1 + coeff[31] * infront1 + coeff[32] * infront2 + coeff[33] * infront3 + coeff[34] * infront4 + coeff[35] * infront5 + coeff[36] * infront6; } } /* Optimization ShamZint: for iteration on Z axis without registers + use of shared memory */ __global__ void calcStencilShamZint(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for writing output out_index += 5*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[out_index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = a[out_index]; __syncthreads(); // Compute stencil b[out_index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 6] + coeff[2] * ds_a[ty][sharedTx - 5] + coeff[3] * ds_a[ty][sharedTx - 4] + coeff[4] * ds_a[ty][sharedTx - 3] + coeff[5] * ds_a[ty][sharedTx - 2] + coeff[6] * ds_a[ty][sharedTx - 1] + coeff[7] * ds_a[ty][sharedTx + 1] + coeff[8] * ds_a[ty][sharedTx + 2] + coeff[9] * ds_a[ty][sharedTx + 3] + coeff[10] * ds_a[ty][sharedTx + 4] + coeff[11] * ds_a[ty][sharedTx + 5] + coeff[12] * ds_a[ty][sharedTx + 6] + coeff[13] * ds_a[ty - 6][sharedTx] + coeff[14] * ds_a[ty - 5][sharedTx] + coeff[15] * ds_a[ty - 4][sharedTx] + coeff[16] * ds_a[ty - 3][sharedTx] + coeff[17] * ds_a[ty - 2][sharedTx] + coeff[18] * ds_a[ty - 1][sharedTx] + coeff[19] * ds_a[ty + 1][sharedTx] + coeff[20] * ds_a[ty + 2][sharedTx] + coeff[21] * ds_a[ty + 3][sharedTx] + coeff[22] * ds_a[ty + 4][sharedTx] + coeff[23] * ds_a[ty + 5][sharedTx] + coeff[24] * ds_a[ty + 6][sharedTx] + coeff[25] * a[out_index - 6*stride] + coeff[26] * a[out_index - 5*stride] + coeff[27] * a[out_index - 4*stride] + coeff[28] * a[out_index - 3*stride] + coeff[29] * a[out_index - 2*stride] + coeff[30] * a[out_index - stride] + coeff[31] * a[out_index + stride] + coeff[32] * a[out_index + 2*stride] + coeff[33] * a[out_index + 3*stride] + coeff[34] * a[out_index + 4*stride] + coeff[35] * a[out_index + 5*stride] + coeff[36] * a[out_index + 6*stride]; } } /* Optimization ShamZintTempReg: shared memory + for iteration on Z axis + temporal blocking (will always compute 2 time iterations) */ __global__ void calcStencilShamZintTempReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS][2]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * (BLOCK_DIMY-2*RADIUS) + ty; int col = blockIdx.x * (BLOCK_DIMX-2*RADIUS) + tx; int stride = pitchedDimx * (dimy + 4*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output int next_index = 0; // Index for plane Z = output + RADIUS // t0 = t + 0 register float t0_infront6; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind6; // Variable to store the value behind (in the Z axis) the current slice register float t0_current; // Input value in the current slice // t1 = t + 1 register float t1_infront6; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind6; // Variable to store the value behind (in the Z axis) the current slice register float t1_current; // Value in current slice for t+1 // Load ghost zones in_index += RADIUS*stride; t0_behind6 = a[in_index]; // Z = -R = -6 in_index += stride; t0_behind5 = a[in_index]; // Z = -R+1 = -5 in_index += stride; t0_behind4 = a[in_index]; // Z = -R+2 = -4 in_index += stride; t0_behind3 = a[in_index]; // Z = -R+3 = -3 in_index += stride; t0_behind2 = a[in_index]; // Z = -R+4 = -2 in_index += stride; t0_behind1 = a[in_index]; // Z = -R+5 = -1 in_index += stride; out_index = in_index; // Index for writing output, Z = 0 t0_current = a[in_index]; // Z = 0 in_index += stride; next_index = in_index; // Z = 1 t0_infront1 = a[in_index]; // Z = 1 in_index += stride; t0_infront2 = a[in_index]; // Z = 2 in_index += stride; t0_infront3 = a[in_index]; // Z = 3 in_index += stride; t0_infront4 = a[in_index]; // Z = 4 in_index += stride; t0_infront5 = a[in_index]; // Z = 5 in_index += stride; t0_infront6 = a[in_index]; // Z = R = 6 in_index += stride; // Load Z = 0 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[out_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 0 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) ) { t1_current = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_current = t0_current; } // Copy planes Z = -1 to -R to registers in t+1 (ghost zones, keep values in 0.0) t1_behind6 = t0_behind6; t1_behind5 = t0_behind5; t1_behind4 = t0_behind4; t1_behind3 = t0_behind3; t1_behind2 = t0_behind2; t1_behind1 = t0_behind1; __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; // Load Z = 1 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 1 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront1 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront1 = t0_current; } __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 2 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 2 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront2 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront2 = t0_current; } __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 3 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront3 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront3 = t0_current; } __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 4 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 4 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront4 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront4 = t0_current; } __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 5 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 4 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront5 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront5 = t0_current; } __syncthreads(); for (int i = 0; i < dimz; i++) { // Load Z = (2R+i) to registers t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = R+i to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = R+i (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (i < dimz-RADIUS) ) { t1_infront6 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront6 = t0_current; } __syncthreads(); // Load Z = k (t + 1) to shared memory ds_a[ty][sharedTx][0] = t1_current; __syncthreads(); // Compute stencil for Z = k (t + 2) but exclude halo zones if ( (threadIdx.y >= RADIUS) && (threadIdx.y < (BLOCK_DIMY - RADIUS)) && (threadIdx.x >= RADIUS) && (threadIdx.x < (BLOCK_DIMX - RADIUS)) ) { b[out_index] = coeff[0] * t1_current + coeff[1] * ds_a[ty][sharedTx - 6][0] + coeff[2] * ds_a[ty][sharedTx - 5][0] + coeff[3] * ds_a[ty][sharedTx - 4][0] + coeff[4] * ds_a[ty][sharedTx - 3][0] + coeff[5] * ds_a[ty][sharedTx - 2][0] + coeff[6] * ds_a[ty][sharedTx - 1][0] + coeff[7] * ds_a[ty][sharedTx + 1][0] + coeff[8] * ds_a[ty][sharedTx + 2][0] + coeff[9] * ds_a[ty][sharedTx + 3][0] + coeff[10] * ds_a[ty][sharedTx + 4][0] + coeff[11] * ds_a[ty][sharedTx + 5][0] + coeff[12] * ds_a[ty][sharedTx + 6][0] + coeff[13] * ds_a[ty - 6][sharedTx][0] + coeff[14] * ds_a[ty - 5][sharedTx][0] + coeff[15] * ds_a[ty - 4][sharedTx][0] + coeff[16] * ds_a[ty - 3][sharedTx][0] + coeff[17] * ds_a[ty - 2][sharedTx][0] + coeff[18] * ds_a[ty - 1][sharedTx][0] + coeff[19] * ds_a[ty + 1][sharedTx][0] + coeff[20] * ds_a[ty + 2][sharedTx][0] + coeff[21] * ds_a[ty + 3][sharedTx][0] + coeff[22] * ds_a[ty + 4][sharedTx][0] + coeff[23] * ds_a[ty + 5][sharedTx][0] + coeff[24] * ds_a[ty + 6][sharedTx][0] + coeff[25] * t1_behind6 + coeff[26] * t1_behind5 + coeff[27] * t1_behind4 + coeff[28] * t1_behind3 + coeff[29] * t1_behind2 + coeff[30] * t1_behind1 + coeff[31] * t1_infront1 + coeff[32] * t1_infront2 + coeff[33] * t1_infront3 + coeff[34] * t1_infront4 + coeff[35] * t1_infront5 + coeff[36] * t1_infront6; } out_index += stride; t1_behind6 = t1_behind5; t1_behind5 = t1_behind4; t1_behind4 = t1_behind3; t1_behind3 = t1_behind2; t1_behind2 = t1_behind1; t1_behind1 = t1_current; t1_current = t1_infront1; t1_infront1 = t1_infront2; t1_infront2 = t1_infront3; t1_infront3 = t1_infront4; t1_infront4 = t1_infront5; t1_infront5 = t1_infront6; } } /* Optimization Roc: use of read only cache (texture memory) */ __global__ void calcStencilRoc(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Compute stencil b[index] = coeff[0] * __ldg(&a[index]) + coeff[1] * __ldg(&a[index - 6]) + coeff[2] * __ldg(&a[index - 5]) + coeff[3] * __ldg(&a[index - 4]) + coeff[4] * __ldg(&a[index - 3]) + coeff[5] * __ldg(&a[index - 2]) + coeff[6] * __ldg(&a[index - 1]) + coeff[7] * __ldg(&a[index + 1]) + coeff[8] * __ldg(&a[index + 2]) + coeff[9] * __ldg(&a[index + 3]) + coeff[10] * __ldg(&a[index + 4]) + coeff[11] * __ldg(&a[index + 5]) + coeff[12] * __ldg(&a[index + 6]) + coeff[13] * __ldg(&a[index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[index - pitchedDimx]) + coeff[19] * __ldg(&a[index + pitchedDimx]) + coeff[20] * __ldg(&a[index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[index + 6*pitchedDimx]) + coeff[25] * __ldg(&a[index - 6*stride]) + coeff[26] * __ldg(&a[index - 5*stride]) + coeff[27] * __ldg(&a[index - 4*stride]) + coeff[28] * __ldg(&a[index - 3*stride]) + coeff[29] * __ldg(&a[index - 2*stride]) + coeff[30] * __ldg(&a[index - stride]) + coeff[31] * __ldg(&a[index + stride]) + coeff[32] * __ldg(&a[index + 2*stride]) + coeff[33] * __ldg(&a[index + 3*stride]) + coeff[34] * __ldg(&a[index + 4*stride]) + coeff[35] * __ldg(&a[index + 5*stride]) + coeff[36] * __ldg(&a[index + 6*stride]); } /* Optimization ShamRoc: use of shared memory + read only cache (texture memory) */ __global__ void calcStencilShamRoc(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy) { // Shared Memory Declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = __ldg(&a[index - (RADIUS*pitchedDimx)]); ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = __ldg(&a[index + (BLOCK_DIMY*pitchedDimx)]); } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = __ldg(&a[index - RADIUS]); ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = __ldg(&a[index + BLOCK_DIMX]); } // Load current position to shared memory ds_a[ty][sharedTx] = __ldg(&a[index]); __syncthreads(); // Compute stencil b[index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 6] + coeff[2] * ds_a[ty][sharedTx - 5] + coeff[3] * ds_a[ty][sharedTx - 4] + coeff[4] * ds_a[ty][sharedTx - 3] + coeff[5] * ds_a[ty][sharedTx - 2] + coeff[6] * ds_a[ty][sharedTx - 1] + coeff[7] * ds_a[ty][sharedTx + 1] + coeff[8] * ds_a[ty][sharedTx + 2] + coeff[9] * ds_a[ty][sharedTx + 3] + coeff[10] * ds_a[ty][sharedTx + 4] + coeff[11] * ds_a[ty][sharedTx + 5] + coeff[12] * ds_a[ty][sharedTx + 6] + coeff[13] * ds_a[ty - 6][sharedTx] + coeff[14] * ds_a[ty - 5][sharedTx] + coeff[15] * ds_a[ty - 4][sharedTx] + coeff[16] * ds_a[ty - 3][sharedTx] + coeff[17] * ds_a[ty - 2][sharedTx] + coeff[18] * ds_a[ty - 1][sharedTx] + coeff[19] * ds_a[ty + 1][sharedTx] + coeff[20] * ds_a[ty + 2][sharedTx] + coeff[21] * ds_a[ty + 3][sharedTx] + coeff[22] * ds_a[ty + 4][sharedTx] + coeff[23] * ds_a[ty + 5][sharedTx] + coeff[24] * ds_a[ty + 6][sharedTx] + coeff[25] * __ldg(&a[index - 6*stride]) + coeff[26] * __ldg(&a[index - 5*stride]) + coeff[27] * __ldg(&a[index - 4*stride]) + coeff[28] * __ldg(&a[index - 3*stride]) + coeff[29] * __ldg(&a[index - 2*stride]) + coeff[30] * __ldg(&a[index - stride]) + coeff[31] * __ldg(&a[index + stride]) + coeff[32] * __ldg(&a[index + 2*stride]) + coeff[33] * __ldg(&a[index + 3*stride]) + coeff[34] * __ldg(&a[index + 4*stride]) + coeff[35] * __ldg(&a[index + 5*stride]) + coeff[36] * __ldg(&a[index + 6*stride]); } /* Optimization RocZintReg: use of iteration on Z axis + read only cache (texture memory) */ __global__ void calcStencilRocZintReg(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5, infront6; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5, behind6; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind6 will be loaded inside the next 'for') behind5 = __ldg(&a[in_index]); in_index += stride; behind4 = __ldg(&a[in_index]); in_index += stride; behind3 = __ldg(&a[in_index]); in_index += stride; behind2 = __ldg(&a[in_index]); in_index += stride; behind1 = __ldg(&a[in_index]); in_index += stride; current = __ldg(&a[in_index]); out_index = in_index; in_index += stride; infront1 = __ldg(&a[in_index]); in_index += stride; infront2 = __ldg(&a[in_index]); in_index += stride; infront3 = __ldg(&a[in_index]); in_index += stride; infront4 = __ldg(&a[in_index]); in_index += stride; infront5 = __ldg(&a[in_index]); in_index += stride; infront6 = __ldg(&a[in_index]); in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind6 = behind5; behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = infront6; infront6 = __ldg(&a[in_index]); in_index += stride; out_index += stride; // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * __ldg(&a[out_index - 6]) + coeff[2] * __ldg(&a[out_index - 5]) + coeff[3] * __ldg(&a[out_index - 4]) + coeff[4] * __ldg(&a[out_index - 3]) + coeff[5] * __ldg(&a[out_index - 2]) + coeff[6] * __ldg(&a[out_index - 1]) + coeff[7] * __ldg(&a[out_index + 1]) + coeff[8] * __ldg(&a[out_index + 2]) + coeff[9] * __ldg(&a[out_index + 3]) + coeff[10] * __ldg(&a[out_index + 4]) + coeff[11] * __ldg(&a[out_index + 5]) + coeff[12] * __ldg(&a[out_index + 6]) + coeff[13] * __ldg(&a[out_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index - pitchedDimx]) + coeff[19] * __ldg(&a[out_index + pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[out_index + 6*pitchedDimx]) + coeff[25] * behind6 + coeff[26] * behind5 + coeff[27] * behind4 + coeff[28] * behind3 + coeff[29] * behind2 + coeff[30] * behind1 + coeff[31] * infront1 + coeff[32] * infront2 + coeff[33] * infront3 + coeff[34] * infront4 + coeff[35] * infront5 + coeff[36] * infront6; } } /* Optimization RocZint: use of iteration on Z axis without registers + read only cache (texture memory) */ __global__ void calcStencilRocZint(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for reading Z values out_index += 5*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Compute stencil b[out_index] = coeff[0] * __ldg(&a[out_index]) + coeff[1] * __ldg(&a[out_index - 6]) + coeff[2] * __ldg(&a[out_index - 5]) + coeff[3] * __ldg(&a[out_index - 4]) + coeff[4] * __ldg(&a[out_index - 3]) + coeff[5] * __ldg(&a[out_index - 2]) + coeff[6] * __ldg(&a[out_index - 1]) + coeff[7] * __ldg(&a[out_index + 1]) + coeff[8] * __ldg(&a[out_index + 2]) + coeff[9] * __ldg(&a[out_index + 3]) + coeff[10] * __ldg(&a[out_index + 4]) + coeff[11] * __ldg(&a[out_index + 5]) + coeff[12] * __ldg(&a[out_index + 6]) + coeff[13] * __ldg(&a[out_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index - pitchedDimx]) + coeff[19] * __ldg(&a[out_index + pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[out_index + 6*pitchedDimx]) + coeff[25] * __ldg(&a[out_index - 6*stride]) + coeff[26] * __ldg(&a[out_index - 5*stride]) + coeff[27] * __ldg(&a[out_index - 4*stride]) + coeff[28] * __ldg(&a[out_index - 3*stride]) + coeff[29] * __ldg(&a[out_index - 2*stride]) + coeff[30] * __ldg(&a[out_index - stride]) + coeff[31] * __ldg(&a[out_index + stride]) + coeff[32] * __ldg(&a[out_index + 2*stride]) + coeff[33] * __ldg(&a[out_index + 3*stride]) + coeff[34] * __ldg(&a[out_index + 4*stride]) + coeff[35] * __ldg(&a[out_index + 5*stride]) + coeff[36] * __ldg(&a[out_index + 6*stride]); } } /* Optimization ShamRocZintTempReg: shared memory + for iteration on Z axis + temporal blocking (will always compute 2 time iterations) */ __global__ void calcStencilShamRocZintTempReg(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY][BLOCK_DIMX]; int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * (BLOCK_DIMY-2*RADIUS) + ty; int col = blockIdx.x * (BLOCK_DIMX-2*RADIUS) + tx; int stride = pitchedDimx * (dimy + 4*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output int next_index = 0; // Index for plane Z = output + RADIUS // t0 = t + 0 register float t0_infront6; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind6; // Variable to store the value behind (in the Z axis) the current slice register float t0_current; // Input value in the current slice // t1 = t + 1 register float t1_infront6; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind6; // Variable to store the value behind (in the Z axis) the current slice register float t1_current; // Value in current slice for t+1 // Load ghost zones in_index += RADIUS*stride; t0_behind6 = __ldg(&a[in_index]); // Z = -R = -6 in_index += stride; t0_behind5 = __ldg(&a[in_index]); // Z = -R+1 = -5 in_index += stride; t0_behind4 = __ldg(&a[in_index]); // Z = -R+2 = -4 in_index += stride; t0_behind3 = __ldg(&a[in_index]); // Z = -R+3 = -3 in_index += stride; t0_behind2 = __ldg(&a[in_index]); // Z = -R+4 = -2 in_index += stride; t0_behind1 = __ldg(&a[in_index]); // Z = -R+5 = -1 in_index += stride; out_index = in_index; // Index for writing output, Z = 0 t0_current = __ldg(&a[in_index]); // Z = 0 in_index += stride; next_index = in_index; // Z = 1 t0_infront1 = __ldg(&a[in_index]); // Z = 1 in_index += stride; t0_infront2 = __ldg(&a[in_index]); // Z = 2 in_index += stride; t0_infront3 = __ldg(&a[in_index]); // Z = 3 in_index += stride; t0_infront4 = __ldg(&a[in_index]); // Z = 4 in_index += stride; t0_infront5 = __ldg(&a[in_index]); // Z = 5 in_index += stride; t0_infront6 = __ldg(&a[in_index]); // Z = R = 6 in_index += stride; // Compute stencil for Z = 0 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) ) { t1_current = coeff[0] * t0_current + coeff[1] * __ldg(&a[out_index - 6]) + coeff[2] * __ldg(&a[out_index - 5]) + coeff[3] * __ldg(&a[out_index - 4]) + coeff[4] * __ldg(&a[out_index - 3]) + coeff[5] * __ldg(&a[out_index - 2]) + coeff[6] * __ldg(&a[out_index - 1]) + coeff[7] * __ldg(&a[out_index + 1]) + coeff[8] * __ldg(&a[out_index + 2]) + coeff[9] * __ldg(&a[out_index + 3]) + coeff[10] * __ldg(&a[out_index + 4]) + coeff[11] * __ldg(&a[out_index + 5]) + coeff[12] * __ldg(&a[out_index + 6]) + coeff[13] * __ldg(&a[out_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index - pitchedDimx]) + coeff[19] * __ldg(&a[out_index + pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[out_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_current = t0_current; } // Copy planes Z = -1 to -R to registers in t+1 (ghost zones, keep values in 0.0) t1_behind6 = t0_behind6; t1_behind5 = t0_behind5; t1_behind4 = t0_behind4; t1_behind3 = t0_behind3; t1_behind2 = t0_behind2; t1_behind1 = t0_behind1; t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; // Compute stencil for Z = 1 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront1 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront1 = t0_current; } t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 2 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront2 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront2 = t0_current; } t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront3 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront3 = t0_current; } t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 4 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront4 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront4 = t0_current; } t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 5 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront5 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront5 = t0_current; } for (int i = 0; i < dimz-(4*RADIUS); i++) { // Load Z = (2R+i) to registers t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = R+i (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (i < dimz-RADIUS) ) { t1_infront6 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront6 = t0_current; } __syncthreads(); // Load Z = k (t + 1) to shared memory ds_a[threadIdx.y][threadIdx.x] = t1_current; __syncthreads(); // Compute stencil for Z = k (t + 2) but exclude halo zones if ( (threadIdx.y >= RADIUS) && (threadIdx.y < (BLOCK_DIMY - RADIUS)) && (threadIdx.x >= RADIUS) && (threadIdx.x < (BLOCK_DIMX - RADIUS)) ) { b[out_index] = coeff[0] * t1_current + coeff[1] * ds_a[threadIdx.y][threadIdx.x - 6] + coeff[2] * ds_a[threadIdx.y][threadIdx.x - 5] + coeff[3] * ds_a[threadIdx.y][threadIdx.x - 4] + coeff[4] * ds_a[threadIdx.y][threadIdx.x - 3] + coeff[5] * ds_a[threadIdx.y][threadIdx.x - 2] + coeff[6] * ds_a[threadIdx.y][threadIdx.x - 1] + coeff[7] * ds_a[threadIdx.y][threadIdx.x + 1] + coeff[8] * ds_a[threadIdx.y][threadIdx.x + 2] + coeff[9] * ds_a[threadIdx.y][threadIdx.x + 3] + coeff[10] * ds_a[threadIdx.y][threadIdx.x + 4] + coeff[11] * ds_a[threadIdx.y][threadIdx.x + 5] + coeff[12] * ds_a[threadIdx.y][threadIdx.x + 6] + coeff[13] * ds_a[threadIdx.y - 6][threadIdx.x] + coeff[14] * ds_a[threadIdx.y - 5][threadIdx.x] + coeff[15] * ds_a[threadIdx.y - 4][threadIdx.x] + coeff[16] * ds_a[threadIdx.y - 3][threadIdx.x] + coeff[17] * ds_a[threadIdx.y - 2][threadIdx.x] + coeff[18] * ds_a[threadIdx.y - 1][threadIdx.x] + coeff[19] * ds_a[threadIdx.y + 1][threadIdx.x] + coeff[20] * ds_a[threadIdx.y + 2][threadIdx.x] + coeff[21] * ds_a[threadIdx.y + 3][threadIdx.x] + coeff[22] * ds_a[threadIdx.y + 4][threadIdx.x] + coeff[23] * ds_a[threadIdx.y + 5][threadIdx.x] + coeff[24] * ds_a[threadIdx.y + 6][threadIdx.x] + coeff[25] * t1_behind6 + coeff[26] * t1_behind5 + coeff[27] * t1_behind4 + coeff[28] * t1_behind3 + coeff[29] * t1_behind2 + coeff[30] * t1_behind1 + coeff[31] * t1_infront1 + coeff[32] * t1_infront2 + coeff[33] * t1_infront3 + coeff[34] * t1_infront4 + coeff[35] * t1_infront5 + coeff[36] * t1_infront6; } out_index += stride; t1_behind6 = t1_behind5; t1_behind5 = t1_behind4; t1_behind4 = t1_behind3; t1_behind3 = t1_behind2; t1_behind2 = t1_behind1; t1_behind1 = t1_current; t1_current = t1_infront1; t1_infront1 = t1_infront2; t1_infront2 = t1_infront3; t1_infront3 = t1_infront4; t1_infront4 = t1_infront5; t1_infront5 = t1_infront6; } } void initGold(float *a, int dimx, int dimy, int dimz, int pitchedDimx) { int stride = pitchedDimx * (dimy+2*RADIUS); int index = 0; for (int i = 0; i < (dimz+2*RADIUS); i++) { for (int j = 0; j < (dimy+2*RADIUS); j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (i<RADIUS || j<RADIUS || i>=dimz+RADIUS || j>=dimy+RADIUS || k<PADDING_SIZE || k>=dimx+PADDING_SIZE) { a[index] = 0.0; } else { a[index] = 1.0; } } } } } void initGoldTemporal(float *a, int dimx, int dimy, int dimz, int pitchedDimx) { int stride = pitchedDimx * (dimy+4*RADIUS); int index = 0; for (int i = 0; i < (dimz+4*RADIUS); i++) { for (int j = 0; j < (dimy+4*RADIUS); j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if ( i<2*RADIUS || j<2*RADIUS || i>=dimz+2*RADIUS || j>=dimy+2*RADIUS || k<PADDING_SIZE || k>=dimx+PADDING_SIZE ) { a[index] = 0.0; } else { a[index] = 1.0; } } } } } void hostStencil(float *a, int t_end, int dimx, int dimy, int dimz, float *hcoeff, int pitchedDimx) { float *b; int stride = pitchedDimx * (dimy+2*RADIUS); b = (float *)malloc((dimz+2*RADIUS) * stride * sizeof(float)); initGold(b, dimx, dimy, dimz, pitchedDimx); int index = 0; for (int t = 0; t < t_end; t++) { for (int i = RADIUS; i < dimz+RADIUS; i++) { for (int j = RADIUS; j < dimy+RADIUS; j++) { for (int k = PADDING_SIZE; k < dimx+PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; if (t%2) { a[index] = hcoeff[0] * b[index] + hcoeff[1] * b[index - 6] + hcoeff[2] * b[index - 5] + hcoeff[3] * b[index - 4] + hcoeff[4] * b[index - 3] + hcoeff[5] * b[index - 2] + hcoeff[6] * b[index - 1] + hcoeff[7] * b[index + 1] + hcoeff[8] * b[index + 2] + hcoeff[9] * b[index + 3] + hcoeff[10] * b[index + 4] + hcoeff[11] * b[index + 5] + hcoeff[12] * b[index + 6] + hcoeff[13] * b[index - 6*pitchedDimx] + hcoeff[14] * b[index - 5*pitchedDimx] + hcoeff[15] * b[index - 4*pitchedDimx] + hcoeff[16] * b[index - 3*pitchedDimx] + hcoeff[17] * b[index - 2*pitchedDimx] + hcoeff[18] * b[index - pitchedDimx] + hcoeff[19] * b[index + pitchedDimx] + hcoeff[20] * b[index + 2*pitchedDimx] + hcoeff[21] * b[index + 3*pitchedDimx] + hcoeff[22] * b[index + 4*pitchedDimx] + hcoeff[23] * b[index + 5*pitchedDimx] + hcoeff[24] * b[index + 6*pitchedDimx] + hcoeff[25] * b[index - 6*stride] + hcoeff[26] * b[index - 5*stride] + hcoeff[27] * b[index - 4*stride] + hcoeff[28] * b[index - 3*stride] + hcoeff[29] * b[index - 2*stride] + hcoeff[30] * b[index - stride] + hcoeff[31] * b[index + stride] + hcoeff[32] * b[index + 2*stride] + hcoeff[33] * b[index + 3*stride] + hcoeff[34] * b[index + 4*stride] + hcoeff[35] * b[index + 5*stride] + hcoeff[36] * b[index + 6*stride]; } else { b[index] = hcoeff[0] * a[index] + hcoeff[1] * a[index - 6] + hcoeff[2] * a[index - 5] + hcoeff[3] * a[index - 4] + hcoeff[4] * a[index - 3] + hcoeff[5] * a[index - 2] + hcoeff[6] * a[index - 1] + hcoeff[7] * a[index + 1] + hcoeff[8] * a[index + 2] + hcoeff[9] * a[index + 3] + hcoeff[10] * a[index + 4] + hcoeff[11] * a[index + 5] + hcoeff[12] * a[index + 6] + hcoeff[13] * a[index - 6*pitchedDimx] + hcoeff[14] * a[index - 5*pitchedDimx] + hcoeff[15] * a[index - 4*pitchedDimx] + hcoeff[16] * a[index - 3*pitchedDimx] + hcoeff[17] * a[index - 2*pitchedDimx] + hcoeff[18] * a[index - pitchedDimx] + hcoeff[19] * a[index + pitchedDimx] + hcoeff[20] * a[index + 2*pitchedDimx] + hcoeff[21] * a[index + 3*pitchedDimx] + hcoeff[22] * a[index + 4*pitchedDimx] + hcoeff[23] * a[index + 5*pitchedDimx] + hcoeff[24] * a[index + 6*pitchedDimx] + hcoeff[25] * a[index - 6*stride] + hcoeff[26] * a[index - 5*stride] + hcoeff[27] * a[index - 4*stride] + hcoeff[28] * a[index - 3*stride] + hcoeff[29] * a[index - 2*stride] + hcoeff[30] * a[index - stride] + hcoeff[31] * a[index + stride] + hcoeff[32] * a[index + 2*stride] + hcoeff[33] * a[index + 3*stride] + hcoeff[34] * a[index + 4*stride] + hcoeff[35] * a[index + 5*stride] + hcoeff[36] * a[index + 6*stride]; } } } } } if (t_end%2) { for (int i = RADIUS; i < dimz+RADIUS; i++) { for (int j = RADIUS; j < dimy+RADIUS; j++) { for (int k = PADDING_SIZE; k < dimx+PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; a[index] = b[index]; } } } } free(b); } void hostStencilTemporal(float *a, int t_end, int dimx, int dimy, int dimz, float *hcoeff, int pitchedDimx) { float *b; int stride = pitchedDimx * (dimy+4*RADIUS); b = (float *)malloc((dimz+2*RADIUS) * stride * sizeof(float)); initGoldTemporal(b, dimx, dimy, dimz, pitchedDimx); int index = 0; for (int t = 0; t < t_end; t++) { for (int i = 2*RADIUS; i < dimz+2*RADIUS; i++) { for (int j = 2*RADIUS; j < dimy+2*RADIUS; j++) { for (int k = PADDING_SIZE; k < pitchedDimx-PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; if (t%2) { a[index] = hcoeff[0] * b[index] + hcoeff[1] * b[index - 6] + hcoeff[2] * b[index - 5] + hcoeff[3] * b[index - 4] + hcoeff[4] * b[index - 3] + hcoeff[5] * b[index - 2] + hcoeff[6] * b[index - 1] + hcoeff[7] * b[index + 1] + hcoeff[8] * b[index + 2] + hcoeff[9] * b[index + 3] + hcoeff[10] * b[index + 4] + hcoeff[11] * b[index + 5] + hcoeff[12] * b[index + 6] + hcoeff[13] * b[index - 6*pitchedDimx] + hcoeff[14] * b[index - 5*pitchedDimx] + hcoeff[15] * b[index - 4*pitchedDimx] + hcoeff[16] * b[index - 3*pitchedDimx] + hcoeff[17] * b[index - 2*pitchedDimx] + hcoeff[18] * b[index - pitchedDimx] + hcoeff[19] * b[index + pitchedDimx] + hcoeff[20] * b[index + 2*pitchedDimx] + hcoeff[21] * b[index + 3*pitchedDimx] + hcoeff[22] * b[index + 4*pitchedDimx] + hcoeff[23] * b[index + 5*pitchedDimx] + hcoeff[24] * b[index + 6*pitchedDimx] + hcoeff[25] * b[index - 6*stride] + hcoeff[26] * b[index - 5*stride] + hcoeff[27] * b[index - 4*stride] + hcoeff[28] * b[index - 3*stride] + hcoeff[29] * b[index - 2*stride] + hcoeff[30] * b[index - stride] + hcoeff[31] * b[index + stride] + hcoeff[32] * b[index + 2*stride] + hcoeff[33] * b[index + 3*stride] + hcoeff[34] * b[index + 4*stride] + hcoeff[35] * b[index + 5*stride] + hcoeff[36] * b[index + 6*stride]; } else { b[index] = hcoeff[0] * a[index] + hcoeff[1] * a[index - 6] + hcoeff[2] * a[index - 5] + hcoeff[3] * a[index - 4] + hcoeff[4] * a[index - 3] + hcoeff[5] * a[index - 2] + hcoeff[6] * a[index - 1] + hcoeff[7] * a[index + 1] + hcoeff[8] * a[index + 2] + hcoeff[9] * a[index + 3] + hcoeff[10] * a[index + 4] + hcoeff[11] * a[index + 5] + hcoeff[12] * a[index + 6] + hcoeff[13] * a[index - 6*pitchedDimx] + hcoeff[14] * a[index - 5*pitchedDimx] + hcoeff[15] * a[index - 4*pitchedDimx] + hcoeff[16] * a[index - 3*pitchedDimx] + hcoeff[17] * a[index - 2*pitchedDimx] + hcoeff[18] * a[index - pitchedDimx] + hcoeff[19] * a[index + pitchedDimx] + hcoeff[20] * a[index + 2*pitchedDimx] + hcoeff[21] * a[index + 3*pitchedDimx] + hcoeff[22] * a[index + 4*pitchedDimx] + hcoeff[23] * a[index + 5*pitchedDimx] + hcoeff[24] * a[index + 6*pitchedDimx] + hcoeff[25] * a[index - 6*stride] + hcoeff[26] * a[index - 5*stride] + hcoeff[27] * a[index - 4*stride] + hcoeff[28] * a[index - 3*stride] + hcoeff[29] * a[index - 2*stride] + hcoeff[30] * a[index - stride] + hcoeff[31] * a[index + stride] + hcoeff[32] * a[index + 2*stride] + hcoeff[33] * a[index + 3*stride] + hcoeff[34] * a[index + 4*stride] + hcoeff[35] * a[index + 5*stride] + hcoeff[36] * a[index + 6*stride]; } } } } } if (t_end%2) { for (int i = 2*RADIUS; i < dimz+2*RADIUS; i++) { for (int j = 2*RADIUS; j < dimy+2*RADIUS; j++) { for (int k = PADDING_SIZE; k < pitchedDimx-PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; a[index] = b[index]; } } } } free(b); } void printMatrix(float *a, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+2*RADIUS); for (int i=0; i < dimz+2*RADIUS; i++) { for (int j=0; j < dimy+2*RADIUS; j++) { for (int k=0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; printf("%f, ",a[index]); } printf("\n"); } printf("\n"); } } void printMatrixTemporal(float *a, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+4*RADIUS); for (int i=0; i < dimz+4*RADIUS; i++) { for (int j=0; j < dimy+4*RADIUS; j++) { for (int k=0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; printf("%f, ",a[index]); } printf("\n"); } printf("\n"); } } bool checkResult(float *a, float *ref, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+2*RADIUS); for (int i = 0; i < dimz+2*RADIUS; i++) { for (int j = 0; j < dimy+2*RADIUS; j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (a[index] != ref[index]) { printf("Expected: %f, received: %f at position [z=%d,y=%d,x=%d]\n",ref[index],a[index],i,j,k); return 0; } } } } return 1; } bool checkResultTemporal(float *a, float *ref, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+4*RADIUS); for (int i = 0; i < dimz+4*RADIUS; i++) { for (int j = 0; j < dimy+4*RADIUS; j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (a[index] != ref[index]) { printf("Expected: %f, received: %f at position [z=%d,y=%d,x=%d]\n",ref[index],a[index],i,j,k); return 0; } } } } return 1; } int main(int argc, char* argv[]) { float *h_a, *h_gold_a; float *d_a, *d_b; float hcoeff[RADIUS*6+1] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; hipEvent_t t0, t1, t2, t3, t4, t5; float init, host_comp, host2gpu, gpu2host, gpu_comp, tot; int dimx, dimy, dimz, t_end; long points, flop; float gFlops; int opt; // Variable to select the optimization char vbs = 0; if (argc == 7) { vbs = 1; } else { if (argc != 6) { printf("use: <exec> <OPT> <DIMX> <DIMY> <DIMZ> <T_END> <VBS(1)>\n" "Available optimizations (value should be used as the first parameter in the command line):\n" "0 - Base -> no optimization\n" "1 - Sham -> shared memory\n" "2 - ZintReg -> for iteration on Z axis (Paulius)\n" "3 - Zint -> for iteration on Z axis without using registers\n" "4 - ShamZintReg -> shared memory + for iteration on Z axis\n" "5 - ShamZint -> shared memory + for iteration on Z axis without registers\n" "6 - ShamZintTempReg -> shared memory + for iteration on Z axis + temporal blocking\n" "7 - Roc -> use of read only cache (__restrict__ and const modifiers)\n" "8 - ShamRoc -> use of shared memory + read only cache (__restrict__ and const modifiers)\n" "9 - RocZintReg -> for iteration on Z axis + read only cache\n" "10 - RocZint -> for iteration on Z axis without registers + read only cache\n" "11 - ShamRocZintTempReg -> shared memory + read only cache + for iteration on Z axis + temporal blocking\n" ); exit(-1); } } opt = atoi(argv[1]); dimx = atoi(argv[2]); dimy = atoi(argv[3]); dimz = atoi(argv[4]); t_end = atoi(argv[5]); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); hipEventCreate(&t0); hipEventCreate(&t1); hipEventCreate(&t2); hipEventCreate(&t3); hipEventCreate(&t4); hipEventCreate(&t5); int pitchedDimx = dimx + 2*PADDING_SIZE; int gold_size; // If temporal blocking is requested, allocate more device memory if ( (opt == 6) || (opt == 11) ) { gold_size = pitchedDimx * (dimy+4*RADIUS) * (dimz+4*RADIUS) * sizeof(float); // Check if the number of iterations is even if ( (t_end%2) != 0) { if (vbs == 0) printf("Number of time iterations is odd, adding one iteration!\n"); t_end++; } } else { gold_size = pitchedDimx * (dimy+2*RADIUS) * (dimz+2*RADIUS) * sizeof(float); } points = (long)dimx * (long)dimy * (long)dimz * (long)t_end; flop = (long)((6*RADIUS) + ((6*RADIUS)+1)) * points; // 36 adds, 37 multiplies hipEventRecord(t0); /* allocate device variables */ wbCheck(hipMalloc((void**) &d_a, gold_size)); wbCheck(hipMalloc((void**) &d_b, gold_size)); /* allocate host variables */ h_a = (float *)malloc(gold_size); h_gold_a = (float *)malloc(gold_size); if ( (opt == 6) || (opt == 11) ) { initGoldTemporal(h_a, dimx, dimy, dimz, pitchedDimx); initGoldTemporal(h_gold_a, dimx, dimy, dimz, pitchedDimx); } else { initGold(h_a, dimx, dimy, dimz, pitchedDimx); initGold(h_gold_a, dimx, dimy, dimz, pitchedDimx); } hipEventRecord(t1); if (vbs == 0) { if ( (opt == 6) || (opt == 11) ) { hostStencilTemporal(h_gold_a, t_end, dimx, dimy, dimz, hcoeff, pitchedDimx); } else { hostStencil(h_gold_a, t_end, dimx, dimy, dimz, hcoeff, pitchedDimx); } } #ifdef PRINT_GOLD if ( (opt == 6) || (opt == 11) ) { printMatrixTemporal(h_gold_a, pitchedDimx, dimy, dimz); } else { printMatrix(h_gold_a, pitchedDimx, dimy, dimz); } #endif hipEventRecord(t2); wbCheck(hipMemcpyToSymbol(coeff, hcoeff, sizeof(hcoeff))); wbCheck(hipMemcpy(d_a, h_a, gold_size, hipMemcpyHostToDevice)); // Initialize device values wbCheck(hipMemcpy(d_b, d_a, gold_size, hipMemcpyDeviceToDevice)); // Copy contents from d_a to d_b hipEventRecord(t3); dim3 dimBlock; dim3 dimGrid; switch (opt) { case 0: if (vbs == 0) printf("Optimization level: 0 - Base\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilBase) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy); } else { hipLaunchKernelGGL(( calcStencilBase) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy); } wbCheck(hipGetLastError()); } break; case 1: if (vbs == 0) printf("Optimization level: 1 - Sham\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilSham) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy); } else { hipLaunchKernelGGL(( calcStencilSham) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy); } wbCheck(hipGetLastError()); } break; case 2: if (vbs == 0) printf("Optimization level: 2 - ZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 3: if (vbs == 0) printf("Optimization level: 3 - Zint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 4: if (vbs == 0) printf("Optimization level: 4 - ShamZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilShamZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 5: if (vbs == 0) printf("Optimization level: 5 - ShamZint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilShamZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 6: if (vbs == 0) printf("Optimization level: 6 - ShamZintTempReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/(BLOCK_DIMX-2*RADIUS)); dimGrid.y = (int)ceil(dimy/(BLOCK_DIMY-2*RADIUS)); dimGrid.z = 1; for (int i = 0; i < t_end/2; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamZintTempReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilShamZintTempReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 7: if (vbs == 0) printf("Optimization level: 7 - Roc\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilRoc) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy); } else { hipLaunchKernelGGL(( calcStencilRoc) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy); } wbCheck(hipGetLastError()); } break; case 8: if (vbs == 0) printf("Optimization level: 8 - ShamRoc\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamRoc) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy); } else { hipLaunchKernelGGL(( calcStencilShamRoc) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy); } wbCheck(hipGetLastError()); } break; case 9: if (vbs == 0) printf("Optimization level: 9 - RocZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilRocZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilRocZintReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 10: if (vbs == 0) printf("Optimization level: 10 - RocZint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilRocZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilRocZint) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; case 11: if (vbs == 0) printf("Optimization level: 11 - ShamRocZintTempReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/(BLOCK_DIMX-2*RADIUS)); dimGrid.y = (int)ceil(dimy/(BLOCK_DIMY-2*RADIUS)); dimGrid.z = 1; for (int i = 0; i < t_end/2; i++) { if (i%2) { hipLaunchKernelGGL(( calcStencilShamRocZintTempReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_b, d_a, pitchedDimx, dimy, dimz); } else { hipLaunchKernelGGL(( calcStencilShamRocZintTempReg) , dim3(dimGrid),dim3(dimBlock) , 0, 0, d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(hipGetLastError()); } break; default: printf("Invalid optimization selected\n"); break; } hipEventRecord(t4); hipDeviceSynchronize(); if ( (opt == 6) || (opt == 11) ) { if ((t_end/2)%2) { wbCheck(hipMemcpy(h_a, d_b, gold_size, hipMemcpyDeviceToHost)); } else { wbCheck(hipMemcpy(h_a, d_a, gold_size, hipMemcpyDeviceToHost)); } } else { if (t_end%2) { wbCheck(hipMemcpy(h_a, d_b, gold_size, hipMemcpyDeviceToHost)); } else { wbCheck(hipMemcpy(h_a, d_a, gold_size, hipMemcpyDeviceToHost)); } } hipEventRecord(t5); hipFree(d_a); hipFree(d_b); #ifdef PRINT_RESULT if ( (opt == 6) || (opt == 11) ) { printMatrixTemporal(h_a,pitchedDimx,dimy,dimz); } else { printMatrix(h_a,pitchedDimx,dimy,dimz); } #endif if (vbs == 0) { if ( (opt == 6) || (opt == 11) ) { if (checkResultTemporal(h_a,h_gold_a,pitchedDimx,dimy,dimz)) { printf("Correct results!\n"); } else { printf("Wrong results!!!!!!\n"); } } else { if (checkResult(h_a,h_gold_a,pitchedDimx,dimy,dimz)) { printf("Correct results!\n"); } else { printf("Wrong results!!!!!!\n"); } } } hipEventSynchronize(t5); hipEventElapsedTime(&init, t0, t1); hipEventElapsedTime(&host_comp, t1, t2); hipEventElapsedTime(&host2gpu, t2, t3); hipEventElapsedTime(&gpu_comp, t3, t4); hipEventElapsedTime(&gpu2host, t4, t5); hipEventElapsedTime(&tot, t0, t5); gFlops = (1.0e-6)*flop/gpu_comp; free(h_a); free(h_gold_a); if (vbs == 0) { printf("GPU Clock: %d MHz\n",prop.clockRate/1000); printf("DIM = %dx%dx%d; T_END = %d; BLOCK_WIDTH = %dx%dx%d\n", dimx,dimy,dimz,t_end,BLOCK_DIMX,BLOCK_DIMY,BLOCK_DIMZ); printf("init=%f, host_comp=%f, host2gpu=%f, gpu_comp=%f, gpu2host=%f, tot=%f \n", init, host_comp, host2gpu, gpu_comp, gpu2host, tot); printf("Stencil Throughput: %f Gpts/s\n", (1.0e-6*points)/gpu_comp); // gpu_comp is measured in ms printf("gFlops = %f GFLOPs\n", gFlops); printf("\n"); } else { printf("%d,%d,%d,%f,%f\n", dimx,dimy,dimz,gFlops,gpu_comp); } return 0; }
8faef9e4b2f1d1abaf300cf02ffebc8368c484a6.cu
/* Available optimizations (value should be used as the first parameter in the command line): 0 - Base -> no optimization 1 - Sham -> shared memory 2 - ZintReg -> for iteration on Z axis (Paulius) 3 - Zint -> for iteration on Z axis without using registers 4 - ShamZintReg -> shared memory + for iteration on Z axis 5 - ShamZint -> shared memory + for iteration on Z axis without registers 6 - ShamZintTempReg -> shared memory + for iteration on Z axis + temporal blocking 7 - Roc -> use of read only cache (__restrict__ and const modifiers) 8 - ShamRoc -> use of shared memory + read only cache (__restrict__ and const modifiers) 9 - RocZintReg -> for iteration on Z axis + read only cache 10 - RocZint -> for iteration on Z axis without registers + read only cache 11 - ShamRocZintTempReg -> shared memory + read only cache + for iteration on Z axis + temporal blocking Known limitations: data grid size must be multiple of BLOCK_SIZE */ #include <stdio.h> //#define PRINT_GOLD //#define PRINT_RESULT #define BLOCK_DIMX 32 #define BLOCK_DIMY 16 #define BLOCK_DIMZ 1 #define RADIUS 6 // Half of the order #define PADDING_SIZE 32 // Error checking function #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ printf("ERROR: Failed to run stmt %s\n", #stmt); \ printf("ERROR: Got CUDA error ... %s\n", cudaGetErrorString(err)); \ return -1; \ } \ } while(0) __constant__ float coeff[RADIUS*6+1]; /* Optimization Base: baseline code (no optimization) */ __global__ void calcStencilBase(float *a, float *b, int pitchedDimx, int dimy) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Compute stencil b[index] = coeff[0] * a[index] + coeff[1] * a[index - 6] + coeff[2] * a[index - 5] + coeff[3] * a[index - 4] + coeff[4] * a[index - 3] + coeff[5] * a[index - 2] + coeff[6] * a[index - 1] + coeff[7] * a[index + 1] + coeff[8] * a[index + 2] + coeff[9] * a[index + 3] + coeff[10] * a[index + 4] + coeff[11] * a[index + 5] + coeff[12] * a[index + 6] + coeff[13] * a[index - 6*pitchedDimx] + coeff[14] * a[index - 5*pitchedDimx] + coeff[15] * a[index - 4*pitchedDimx] + coeff[16] * a[index - 3*pitchedDimx] + coeff[17] * a[index - 2*pitchedDimx] + coeff[18] * a[index - pitchedDimx] + coeff[19] * a[index + pitchedDimx] + coeff[20] * a[index + 2*pitchedDimx] + coeff[21] * a[index + 3*pitchedDimx] + coeff[22] * a[index + 4*pitchedDimx] + coeff[23] * a[index + 5*pitchedDimx] + coeff[24] * a[index + 6*pitchedDimx] + coeff[25] * a[index - 6*stride] + coeff[26] * a[index - 5*stride] + coeff[27] * a[index - 4*stride] + coeff[28] * a[index - 3*stride] + coeff[29] * a[index - 2*stride] + coeff[30] * a[index - stride] + coeff[31] * a[index + stride] + coeff[32] * a[index + 2*stride] + coeff[33] * a[index + 3*stride] + coeff[34] * a[index + 4*stride] + coeff[35] * a[index + 5*stride] + coeff[36] * a[index + 6*stride]; } /* Optimization Sham: shared memory */ __global__ void calcStencilSham(float *a, float *b, int pitchedDimx, int dimy) { // Shared Memory Declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[index - (RADIUS*pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[index + (BLOCK_DIMY*pitchedDimx)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = a[index]; __syncthreads(); // Compute stencil b[index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 6] + coeff[2] * ds_a[ty][sharedTx - 5] + coeff[3] * ds_a[ty][sharedTx - 4] + coeff[4] * ds_a[ty][sharedTx - 3] + coeff[5] * ds_a[ty][sharedTx - 2] + coeff[6] * ds_a[ty][sharedTx - 1] + coeff[7] * ds_a[ty][sharedTx + 1] + coeff[8] * ds_a[ty][sharedTx + 2] + coeff[9] * ds_a[ty][sharedTx + 3] + coeff[10] * ds_a[ty][sharedTx + 4] + coeff[11] * ds_a[ty][sharedTx + 5] + coeff[12] * ds_a[ty][sharedTx + 6] + coeff[13] * ds_a[ty - 6][sharedTx] + coeff[14] * ds_a[ty - 5][sharedTx] + coeff[15] * ds_a[ty - 4][sharedTx] + coeff[16] * ds_a[ty - 3][sharedTx] + coeff[17] * ds_a[ty - 2][sharedTx] + coeff[18] * ds_a[ty - 1][sharedTx] + coeff[19] * ds_a[ty + 1][sharedTx] + coeff[20] * ds_a[ty + 2][sharedTx] + coeff[21] * ds_a[ty + 3][sharedTx] + coeff[22] * ds_a[ty + 4][sharedTx] + coeff[23] * ds_a[ty + 5][sharedTx] + coeff[24] * ds_a[ty + 6][sharedTx] + coeff[25] * a[index - 6*stride] + coeff[26] * a[index - 5*stride] + coeff[27] * a[index - 4*stride] + coeff[28] * a[index - 3*stride] + coeff[29] * a[index - 2*stride] + coeff[30] * a[index - stride] + coeff[31] * a[index + stride] + coeff[32] * a[index + 2*stride] + coeff[33] * a[index + 3*stride] + coeff[34] * a[index + 4*stride] + coeff[35] * a[index + 5*stride] + coeff[36] * a[index + 6*stride]; } /* Optimization ZintReg: for iteration on Z axis with registers */ __global__ void calcStencilZintReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5, infront6; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5, behind6; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind6 will be loaded inside the next 'for') behind5 = a[in_index]; in_index += stride; behind4 = a[in_index]; in_index += stride; behind3 = a[in_index]; in_index += stride; behind2 = a[in_index]; in_index += stride; behind1 = a[in_index]; in_index += stride; current = a[in_index]; out_index = in_index; in_index += stride; infront1 = a[in_index]; in_index += stride; infront2 = a[in_index]; in_index += stride; infront3 = a[in_index]; in_index += stride; infront4 = a[in_index]; in_index += stride; infront5 = a[in_index]; in_index += stride; infront6 = a[in_index]; in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind6 = behind5; behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = infront6; infront6 = a[in_index]; in_index += stride; out_index += stride; // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * a[out_index - 6] + coeff[2] * a[out_index - 5] + coeff[3] * a[out_index - 4] + coeff[4] * a[out_index - 3] + coeff[5] * a[out_index - 2] + coeff[6] * a[out_index - 1] + coeff[7] * a[out_index + 1] + coeff[8] * a[out_index + 2] + coeff[9] * a[out_index + 3] + coeff[10] * a[out_index + 4] + coeff[11] * a[out_index + 5] + coeff[12] * a[out_index + 6] + coeff[13] * a[out_index - 6*pitchedDimx] + coeff[14] * a[out_index - 5*pitchedDimx] + coeff[15] * a[out_index - 4*pitchedDimx] + coeff[16] * a[out_index - 3*pitchedDimx] + coeff[17] * a[out_index - 2*pitchedDimx] + coeff[18] * a[out_index - pitchedDimx] + coeff[19] * a[out_index + pitchedDimx] + coeff[20] * a[out_index + 2*pitchedDimx] + coeff[21] * a[out_index + 3*pitchedDimx] + coeff[22] * a[out_index + 4*pitchedDimx] + coeff[23] * a[out_index + 5*pitchedDimx] + coeff[24] * a[out_index + 6*pitchedDimx] + coeff[25] * behind6 + coeff[26] * behind5 + coeff[27] * behind4 + coeff[28] * behind3 + coeff[29] * behind2 + coeff[30] * behind1 + coeff[31] * infront1 + coeff[32] * infront2 + coeff[33] * infront3 + coeff[34] * infront4 + coeff[35] * infront5 + coeff[36] * infront6; } } /* Optimization Zint: for iteration on Z axis without using registers */ __global__ void calcStencilZint(float *a, float *b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for writing output out_index += 5*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Compute stencil b[out_index] = coeff[0] * a[out_index] + coeff[1] * a[out_index - 6] + coeff[2] * a[out_index - 5] + coeff[3] * a[out_index - 4] + coeff[4] * a[out_index - 3] + coeff[5] * a[out_index - 2] + coeff[6] * a[out_index - 1] + coeff[7] * a[out_index + 1] + coeff[8] * a[out_index + 2] + coeff[9] * a[out_index + 3] + coeff[10] * a[out_index + 4] + coeff[11] * a[out_index + 5] + coeff[12] * a[out_index + 6] + coeff[13] * a[out_index - 6*pitchedDimx] + coeff[14] * a[out_index - 5*pitchedDimx] + coeff[15] * a[out_index - 4*pitchedDimx] + coeff[16] * a[out_index - 3*pitchedDimx] + coeff[17] * a[out_index - 2*pitchedDimx] + coeff[18] * a[out_index - pitchedDimx] + coeff[19] * a[out_index + pitchedDimx] + coeff[20] * a[out_index + 2*pitchedDimx] + coeff[21] * a[out_index + 3*pitchedDimx] + coeff[22] * a[out_index + 4*pitchedDimx] + coeff[23] * a[out_index + 5*pitchedDimx] + coeff[24] * a[out_index + 6*pitchedDimx] + coeff[25] * a[out_index - 6*stride] + coeff[26] * a[out_index - 5*stride] + coeff[27] * a[out_index - 4*stride] + coeff[28] * a[out_index - 3*stride] + coeff[29] * a[out_index - 2*stride] + coeff[30] * a[out_index - stride] + coeff[31] * a[out_index + stride] + coeff[32] * a[out_index + 2*stride] + coeff[33] * a[out_index + 3*stride] + coeff[34] * a[out_index + 4*stride] + coeff[35] * a[out_index + 5*stride] + coeff[36] * a[out_index + 6*stride]; } } /* Optimization ShamZintReg: for iteration on Z axis + use of shared memory */ __global__ void calcStencilShamZintReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5, infront6; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5, behind6; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind6 will be loaded inside the next 'for') behind5 = a[in_index]; in_index += stride; behind4 = a[in_index]; in_index += stride; behind3 = a[in_index]; in_index += stride; behind2 = a[in_index]; in_index += stride; behind1 = a[in_index]; in_index += stride; current = a[in_index]; out_index = in_index; in_index += stride; infront1 = a[in_index]; in_index += stride; infront2 = a[in_index]; in_index += stride; infront3 = a[in_index]; in_index += stride; infront4 = a[in_index]; in_index += stride; infront5 = a[in_index]; in_index += stride; infront6 = a[in_index]; in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind6 = behind5; behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = infront6; infront6 = a[in_index]; in_index += stride; out_index += stride; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[out_index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = current; __syncthreads(); // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * ds_a[ty][sharedTx - 6] + coeff[2] * ds_a[ty][sharedTx - 5] + coeff[3] * ds_a[ty][sharedTx - 4] + coeff[4] * ds_a[ty][sharedTx - 3] + coeff[5] * ds_a[ty][sharedTx - 2] + coeff[6] * ds_a[ty][sharedTx - 1] + coeff[7] * ds_a[ty][sharedTx + 1] + coeff[8] * ds_a[ty][sharedTx + 2] + coeff[9] * ds_a[ty][sharedTx + 3] + coeff[10] * ds_a[ty][sharedTx + 4] + coeff[11] * ds_a[ty][sharedTx + 5] + coeff[12] * ds_a[ty][sharedTx + 6] + coeff[13] * ds_a[ty - 6][sharedTx] + coeff[14] * ds_a[ty - 5][sharedTx] + coeff[15] * ds_a[ty - 4][sharedTx] + coeff[16] * ds_a[ty - 3][sharedTx] + coeff[17] * ds_a[ty - 2][sharedTx] + coeff[18] * ds_a[ty - 1][sharedTx] + coeff[19] * ds_a[ty + 1][sharedTx] + coeff[20] * ds_a[ty + 2][sharedTx] + coeff[21] * ds_a[ty + 3][sharedTx] + coeff[22] * ds_a[ty + 4][sharedTx] + coeff[23] * ds_a[ty + 5][sharedTx] + coeff[24] * ds_a[ty + 6][sharedTx] + coeff[25] * behind6 + coeff[26] * behind5 + coeff[27] * behind4 + coeff[28] * behind3 + coeff[29] * behind2 + coeff[30] * behind1 + coeff[31] * infront1 + coeff[32] * infront2 + coeff[33] * infront3 + coeff[34] * infront4 + coeff[35] * infront5 + coeff[36] * infront6; } } /* Optimization ShamZint: for iteration on Z axis without registers + use of shared memory */ __global__ void calcStencilShamZint(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for writing output out_index += 5*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = a[out_index + BLOCK_DIMX]; } // Load current position to shared memory ds_a[ty][sharedTx] = a[out_index]; __syncthreads(); // Compute stencil b[out_index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 6] + coeff[2] * ds_a[ty][sharedTx - 5] + coeff[3] * ds_a[ty][sharedTx - 4] + coeff[4] * ds_a[ty][sharedTx - 3] + coeff[5] * ds_a[ty][sharedTx - 2] + coeff[6] * ds_a[ty][sharedTx - 1] + coeff[7] * ds_a[ty][sharedTx + 1] + coeff[8] * ds_a[ty][sharedTx + 2] + coeff[9] * ds_a[ty][sharedTx + 3] + coeff[10] * ds_a[ty][sharedTx + 4] + coeff[11] * ds_a[ty][sharedTx + 5] + coeff[12] * ds_a[ty][sharedTx + 6] + coeff[13] * ds_a[ty - 6][sharedTx] + coeff[14] * ds_a[ty - 5][sharedTx] + coeff[15] * ds_a[ty - 4][sharedTx] + coeff[16] * ds_a[ty - 3][sharedTx] + coeff[17] * ds_a[ty - 2][sharedTx] + coeff[18] * ds_a[ty - 1][sharedTx] + coeff[19] * ds_a[ty + 1][sharedTx] + coeff[20] * ds_a[ty + 2][sharedTx] + coeff[21] * ds_a[ty + 3][sharedTx] + coeff[22] * ds_a[ty + 4][sharedTx] + coeff[23] * ds_a[ty + 5][sharedTx] + coeff[24] * ds_a[ty + 6][sharedTx] + coeff[25] * a[out_index - 6*stride] + coeff[26] * a[out_index - 5*stride] + coeff[27] * a[out_index - 4*stride] + coeff[28] * a[out_index - 3*stride] + coeff[29] * a[out_index - 2*stride] + coeff[30] * a[out_index - stride] + coeff[31] * a[out_index + stride] + coeff[32] * a[out_index + 2*stride] + coeff[33] * a[out_index + 3*stride] + coeff[34] * a[out_index + 4*stride] + coeff[35] * a[out_index + 5*stride] + coeff[36] * a[out_index + 6*stride]; } } /* Optimization ShamZintTempReg: shared memory + for iteration on Z axis + temporal blocking (will always compute 2 time iterations) */ __global__ void calcStencilShamZintTempReg(float *a, float *b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS][2]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int row = blockIdx.y * (BLOCK_DIMY-2*RADIUS) + ty; int col = blockIdx.x * (BLOCK_DIMX-2*RADIUS) + tx; int stride = pitchedDimx * (dimy + 4*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output int next_index = 0; // Index for plane Z = output + RADIUS // t0 = t + 0 register float t0_infront6; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind6; // Variable to store the value behind (in the Z axis) the current slice register float t0_current; // Input value in the current slice // t1 = t + 1 register float t1_infront6; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind6; // Variable to store the value behind (in the Z axis) the current slice register float t1_current; // Value in current slice for t+1 // Load ghost zones in_index += RADIUS*stride; t0_behind6 = a[in_index]; // Z = -R = -6 in_index += stride; t0_behind5 = a[in_index]; // Z = -R+1 = -5 in_index += stride; t0_behind4 = a[in_index]; // Z = -R+2 = -4 in_index += stride; t0_behind3 = a[in_index]; // Z = -R+3 = -3 in_index += stride; t0_behind2 = a[in_index]; // Z = -R+4 = -2 in_index += stride; t0_behind1 = a[in_index]; // Z = -R+5 = -1 in_index += stride; out_index = in_index; // Index for writing output, Z = 0 t0_current = a[in_index]; // Z = 0 in_index += stride; next_index = in_index; // Z = 1 t0_infront1 = a[in_index]; // Z = 1 in_index += stride; t0_infront2 = a[in_index]; // Z = 2 in_index += stride; t0_infront3 = a[in_index]; // Z = 3 in_index += stride; t0_infront4 = a[in_index]; // Z = 4 in_index += stride; t0_infront5 = a[in_index]; // Z = 5 in_index += stride; t0_infront6 = a[in_index]; // Z = R = 6 in_index += stride; // Load Z = 0 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[out_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[out_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[out_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[out_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 0 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) ) { t1_current = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_current = t0_current; } // Copy planes Z = -1 to -R to registers in t+1 (ghost zones, keep values in 0.0) t1_behind6 = t0_behind6; t1_behind5 = t0_behind5; t1_behind4 = t0_behind4; t1_behind3 = t0_behind3; t1_behind2 = t0_behind2; t1_behind1 = t0_behind1; __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; // Load Z = 1 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 1 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront1 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront1 = t0_current; } __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 2 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 2 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront2 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront2 = t0_current; } __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 3 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront3 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront3 = t0_current; } __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 4 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 4 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront4 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront4 = t0_current; } __syncthreads(); t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = 5 to shared memory // Load above/below halo data if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = 4 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront5 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront5 = t0_current; } __syncthreads(); for (int i = 0; i < dimz; i++) { // Load Z = (2R+i) to registers t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = a[in_index]; in_index += stride; next_index += stride; // Load Z = R+i to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx][1] = a[next_index - (RADIUS * pitchedDimx)]; ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx][1] = a[next_index + (pitchedDimx * BLOCK_DIMY)]; } // Load left/right halo data if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x][1] = a[next_index - RADIUS]; ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS][1] = a[next_index + BLOCK_DIMX]; } ds_a[ty][sharedTx][1] = t0_current; __syncthreads(); // Compute stencil for Z = R+i (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (i < dimz-RADIUS) ) { t1_infront6 = coeff[0] * t0_current + coeff[1] * ds_a[ty][sharedTx - 6][1] + coeff[2] * ds_a[ty][sharedTx - 5][1] + coeff[3] * ds_a[ty][sharedTx - 4][1] + coeff[4] * ds_a[ty][sharedTx - 3][1] + coeff[5] * ds_a[ty][sharedTx - 2][1] + coeff[6] * ds_a[ty][sharedTx - 1][1] + coeff[7] * ds_a[ty][sharedTx + 1][1] + coeff[8] * ds_a[ty][sharedTx + 2][1] + coeff[9] * ds_a[ty][sharedTx + 3][1] + coeff[10] * ds_a[ty][sharedTx + 4][1] + coeff[11] * ds_a[ty][sharedTx + 5][1] + coeff[12] * ds_a[ty][sharedTx + 6][1] + coeff[13] * ds_a[ty - 6][sharedTx][1] + coeff[14] * ds_a[ty - 5][sharedTx][1] + coeff[15] * ds_a[ty - 4][sharedTx][1] + coeff[16] * ds_a[ty - 3][sharedTx][1] + coeff[17] * ds_a[ty - 2][sharedTx][1] + coeff[18] * ds_a[ty - 1][sharedTx][1] + coeff[19] * ds_a[ty + 1][sharedTx][1] + coeff[20] * ds_a[ty + 2][sharedTx][1] + coeff[21] * ds_a[ty + 3][sharedTx][1] + coeff[22] * ds_a[ty + 4][sharedTx][1] + coeff[23] * ds_a[ty + 5][sharedTx][1] + coeff[24] * ds_a[ty + 6][sharedTx][1] + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront6 = t0_current; } __syncthreads(); // Load Z = k (t + 1) to shared memory ds_a[ty][sharedTx][0] = t1_current; __syncthreads(); // Compute stencil for Z = k (t + 2) but exclude halo zones if ( (threadIdx.y >= RADIUS) && (threadIdx.y < (BLOCK_DIMY - RADIUS)) && (threadIdx.x >= RADIUS) && (threadIdx.x < (BLOCK_DIMX - RADIUS)) ) { b[out_index] = coeff[0] * t1_current + coeff[1] * ds_a[ty][sharedTx - 6][0] + coeff[2] * ds_a[ty][sharedTx - 5][0] + coeff[3] * ds_a[ty][sharedTx - 4][0] + coeff[4] * ds_a[ty][sharedTx - 3][0] + coeff[5] * ds_a[ty][sharedTx - 2][0] + coeff[6] * ds_a[ty][sharedTx - 1][0] + coeff[7] * ds_a[ty][sharedTx + 1][0] + coeff[8] * ds_a[ty][sharedTx + 2][0] + coeff[9] * ds_a[ty][sharedTx + 3][0] + coeff[10] * ds_a[ty][sharedTx + 4][0] + coeff[11] * ds_a[ty][sharedTx + 5][0] + coeff[12] * ds_a[ty][sharedTx + 6][0] + coeff[13] * ds_a[ty - 6][sharedTx][0] + coeff[14] * ds_a[ty - 5][sharedTx][0] + coeff[15] * ds_a[ty - 4][sharedTx][0] + coeff[16] * ds_a[ty - 3][sharedTx][0] + coeff[17] * ds_a[ty - 2][sharedTx][0] + coeff[18] * ds_a[ty - 1][sharedTx][0] + coeff[19] * ds_a[ty + 1][sharedTx][0] + coeff[20] * ds_a[ty + 2][sharedTx][0] + coeff[21] * ds_a[ty + 3][sharedTx][0] + coeff[22] * ds_a[ty + 4][sharedTx][0] + coeff[23] * ds_a[ty + 5][sharedTx][0] + coeff[24] * ds_a[ty + 6][sharedTx][0] + coeff[25] * t1_behind6 + coeff[26] * t1_behind5 + coeff[27] * t1_behind4 + coeff[28] * t1_behind3 + coeff[29] * t1_behind2 + coeff[30] * t1_behind1 + coeff[31] * t1_infront1 + coeff[32] * t1_infront2 + coeff[33] * t1_infront3 + coeff[34] * t1_infront4 + coeff[35] * t1_infront5 + coeff[36] * t1_infront6; } out_index += stride; t1_behind6 = t1_behind5; t1_behind5 = t1_behind4; t1_behind4 = t1_behind3; t1_behind3 = t1_behind2; t1_behind2 = t1_behind1; t1_behind1 = t1_current; t1_current = t1_infront1; t1_infront1 = t1_infront2; t1_infront2 = t1_infront3; t1_infront3 = t1_infront4; t1_infront4 = t1_infront5; t1_infront5 = t1_infront6; } } /* Optimization Roc: use of read only cache (texture memory) */ __global__ void calcStencilRoc(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Compute stencil b[index] = coeff[0] * __ldg(&a[index]) + coeff[1] * __ldg(&a[index - 6]) + coeff[2] * __ldg(&a[index - 5]) + coeff[3] * __ldg(&a[index - 4]) + coeff[4] * __ldg(&a[index - 3]) + coeff[5] * __ldg(&a[index - 2]) + coeff[6] * __ldg(&a[index - 1]) + coeff[7] * __ldg(&a[index + 1]) + coeff[8] * __ldg(&a[index + 2]) + coeff[9] * __ldg(&a[index + 3]) + coeff[10] * __ldg(&a[index + 4]) + coeff[11] * __ldg(&a[index + 5]) + coeff[12] * __ldg(&a[index + 6]) + coeff[13] * __ldg(&a[index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[index - pitchedDimx]) + coeff[19] * __ldg(&a[index + pitchedDimx]) + coeff[20] * __ldg(&a[index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[index + 6*pitchedDimx]) + coeff[25] * __ldg(&a[index - 6*stride]) + coeff[26] * __ldg(&a[index - 5*stride]) + coeff[27] * __ldg(&a[index - 4*stride]) + coeff[28] * __ldg(&a[index - 3*stride]) + coeff[29] * __ldg(&a[index - 2*stride]) + coeff[30] * __ldg(&a[index - stride]) + coeff[31] * __ldg(&a[index + stride]) + coeff[32] * __ldg(&a[index + 2*stride]) + coeff[33] * __ldg(&a[index + 3*stride]) + coeff[34] * __ldg(&a[index + 4*stride]) + coeff[35] * __ldg(&a[index + 5*stride]) + coeff[36] * __ldg(&a[index + 6*stride]); } /* Optimization ShamRoc: use of shared memory + read only cache (texture memory) */ __global__ void calcStencilShamRoc(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy) { // Shared Memory Declaration __shared__ float ds_a[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int tx = threadIdx.x + PADDING_SIZE; int sharedTx = threadIdx.x + RADIUS; // Index for shared memory (no padding) int ty = threadIdx.y + RADIUS; int tz = threadIdx.z + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int depth = blockIdx.z * blockDim.z + tz; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int index = (depth * stride) + (row * pitchedDimx) + col; // Load above/below halo data to shared memory if (threadIdx.y < RADIUS) { ds_a[threadIdx.y][sharedTx] = __ldg(&a[index - (RADIUS*pitchedDimx)]); ds_a[threadIdx.y + BLOCK_DIMY + RADIUS][sharedTx] = __ldg(&a[index + (BLOCK_DIMY*pitchedDimx)]); } // Load left/right halo data to shared memory if (threadIdx.x < RADIUS) { ds_a[ty][threadIdx.x] = __ldg(&a[index - RADIUS]); ds_a[ty][threadIdx.x + BLOCK_DIMX + RADIUS] = __ldg(&a[index + BLOCK_DIMX]); } // Load current position to shared memory ds_a[ty][sharedTx] = __ldg(&a[index]); __syncthreads(); // Compute stencil b[index] = coeff[0] * ds_a[ty][sharedTx] + coeff[1] * ds_a[ty][sharedTx - 6] + coeff[2] * ds_a[ty][sharedTx - 5] + coeff[3] * ds_a[ty][sharedTx - 4] + coeff[4] * ds_a[ty][sharedTx - 3] + coeff[5] * ds_a[ty][sharedTx - 2] + coeff[6] * ds_a[ty][sharedTx - 1] + coeff[7] * ds_a[ty][sharedTx + 1] + coeff[8] * ds_a[ty][sharedTx + 2] + coeff[9] * ds_a[ty][sharedTx + 3] + coeff[10] * ds_a[ty][sharedTx + 4] + coeff[11] * ds_a[ty][sharedTx + 5] + coeff[12] * ds_a[ty][sharedTx + 6] + coeff[13] * ds_a[ty - 6][sharedTx] + coeff[14] * ds_a[ty - 5][sharedTx] + coeff[15] * ds_a[ty - 4][sharedTx] + coeff[16] * ds_a[ty - 3][sharedTx] + coeff[17] * ds_a[ty - 2][sharedTx] + coeff[18] * ds_a[ty - 1][sharedTx] + coeff[19] * ds_a[ty + 1][sharedTx] + coeff[20] * ds_a[ty + 2][sharedTx] + coeff[21] * ds_a[ty + 3][sharedTx] + coeff[22] * ds_a[ty + 4][sharedTx] + coeff[23] * ds_a[ty + 5][sharedTx] + coeff[24] * ds_a[ty + 6][sharedTx] + coeff[25] * __ldg(&a[index - 6*stride]) + coeff[26] * __ldg(&a[index - 5*stride]) + coeff[27] * __ldg(&a[index - 4*stride]) + coeff[28] * __ldg(&a[index - 3*stride]) + coeff[29] * __ldg(&a[index - 2*stride]) + coeff[30] * __ldg(&a[index - stride]) + coeff[31] * __ldg(&a[index + stride]) + coeff[32] * __ldg(&a[index + 2*stride]) + coeff[33] * __ldg(&a[index + 3*stride]) + coeff[34] * __ldg(&a[index + 4*stride]) + coeff[35] * __ldg(&a[index + 5*stride]) + coeff[36] * __ldg(&a[index + 6*stride]); } /* Optimization RocZintReg: use of iteration on Z axis + read only cache (texture memory) */ __global__ void calcStencilRocZintReg(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output register float infront1, infront2, infront3, infront4, infront5, infront6; // Variable to store the value in front (in the Z axis) of the current slice register float behind1, behind2, behind3, behind4, behind5, behind6; // Variable to store the value behind (in the Z axis) the current slice register float current; // Input value in the current slice // Load initial values (behind6 will be loaded inside the next 'for') behind5 = __ldg(&a[in_index]); in_index += stride; behind4 = __ldg(&a[in_index]); in_index += stride; behind3 = __ldg(&a[in_index]); in_index += stride; behind2 = __ldg(&a[in_index]); in_index += stride; behind1 = __ldg(&a[in_index]); in_index += stride; current = __ldg(&a[in_index]); out_index = in_index; in_index += stride; infront1 = __ldg(&a[in_index]); in_index += stride; infront2 = __ldg(&a[in_index]); in_index += stride; infront3 = __ldg(&a[in_index]); in_index += stride; infront4 = __ldg(&a[in_index]); in_index += stride; infront5 = __ldg(&a[in_index]); in_index += stride; infront6 = __ldg(&a[in_index]); in_index += stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { // Load the new values in Z axis behind6 = behind5; behind5 = behind4; behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = infront5; infront5 = infront6; infront6 = __ldg(&a[in_index]); in_index += stride; out_index += stride; // Compute stencil b[out_index] = coeff[0] * current + coeff[1] * __ldg(&a[out_index - 6]) + coeff[2] * __ldg(&a[out_index - 5]) + coeff[3] * __ldg(&a[out_index - 4]) + coeff[4] * __ldg(&a[out_index - 3]) + coeff[5] * __ldg(&a[out_index - 2]) + coeff[6] * __ldg(&a[out_index - 1]) + coeff[7] * __ldg(&a[out_index + 1]) + coeff[8] * __ldg(&a[out_index + 2]) + coeff[9] * __ldg(&a[out_index + 3]) + coeff[10] * __ldg(&a[out_index + 4]) + coeff[11] * __ldg(&a[out_index + 5]) + coeff[12] * __ldg(&a[out_index + 6]) + coeff[13] * __ldg(&a[out_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index - pitchedDimx]) + coeff[19] * __ldg(&a[out_index + pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[out_index + 6*pitchedDimx]) + coeff[25] * behind6 + coeff[26] * behind5 + coeff[27] * behind4 + coeff[28] * behind3 + coeff[29] * behind2 + coeff[30] * behind1 + coeff[31] * infront1 + coeff[32] * infront2 + coeff[33] * infront3 + coeff[34] * infront4 + coeff[35] * infront5 + coeff[36] * infront6; } } /* Optimization RocZint: use of iteration on Z axis without registers + read only cache (texture memory) */ __global__ void calcStencilRocZint(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * blockDim.y + ty; int col = blockIdx.x * blockDim.x + tx; int stride = pitchedDimx * (dimy + 2*RADIUS); // 2D slice int out_index = (row * pitchedDimx) + col; // Index for reading Z values out_index += 5*stride; // Iterate over the Z axis for (int i = 0; i < dimz; i++) { out_index += stride; // Compute stencil b[out_index] = coeff[0] * __ldg(&a[out_index]) + coeff[1] * __ldg(&a[out_index - 6]) + coeff[2] * __ldg(&a[out_index - 5]) + coeff[3] * __ldg(&a[out_index - 4]) + coeff[4] * __ldg(&a[out_index - 3]) + coeff[5] * __ldg(&a[out_index - 2]) + coeff[6] * __ldg(&a[out_index - 1]) + coeff[7] * __ldg(&a[out_index + 1]) + coeff[8] * __ldg(&a[out_index + 2]) + coeff[9] * __ldg(&a[out_index + 3]) + coeff[10] * __ldg(&a[out_index + 4]) + coeff[11] * __ldg(&a[out_index + 5]) + coeff[12] * __ldg(&a[out_index + 6]) + coeff[13] * __ldg(&a[out_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index - pitchedDimx]) + coeff[19] * __ldg(&a[out_index + pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[out_index + 6*pitchedDimx]) + coeff[25] * __ldg(&a[out_index - 6*stride]) + coeff[26] * __ldg(&a[out_index - 5*stride]) + coeff[27] * __ldg(&a[out_index - 4*stride]) + coeff[28] * __ldg(&a[out_index - 3*stride]) + coeff[29] * __ldg(&a[out_index - 2*stride]) + coeff[30] * __ldg(&a[out_index - stride]) + coeff[31] * __ldg(&a[out_index + stride]) + coeff[32] * __ldg(&a[out_index + 2*stride]) + coeff[33] * __ldg(&a[out_index + 3*stride]) + coeff[34] * __ldg(&a[out_index + 4*stride]) + coeff[35] * __ldg(&a[out_index + 5*stride]) + coeff[36] * __ldg(&a[out_index + 6*stride]); } } /* Optimization ShamRocZintTempReg: shared memory + for iteration on Z axis + temporal blocking (will always compute 2 time iterations) */ __global__ void calcStencilShamRocZintTempReg(const float* __restrict__ a, float* __restrict__ b, int pitchedDimx, int dimy, int dimz) { // Shared memory declaration __shared__ float ds_a[BLOCK_DIMY][BLOCK_DIMX]; int tx = threadIdx.x + PADDING_SIZE; int ty = threadIdx.y + RADIUS; int row = blockIdx.y * (BLOCK_DIMY-2*RADIUS) + ty; int col = blockIdx.x * (BLOCK_DIMX-2*RADIUS) + tx; int stride = pitchedDimx * (dimy + 4*RADIUS); // 2D slice int in_index = (row * pitchedDimx) + col; // Index for reading Z values int out_index = 0; // Index for writing output int next_index = 0; // Index for plane Z = output + RADIUS // t0 = t + 0 register float t0_infront6; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t0_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t0_behind6; // Variable to store the value behind (in the Z axis) the current slice register float t0_current; // Input value in the current slice // t1 = t + 1 register float t1_infront6; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront5; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront4; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront3; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront2; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_infront1; // Variable to store the value ahead (in the Z axis) of the current slice register float t1_behind1; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind2; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind3; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind4; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind5; // Variable to store the value behind (in the Z axis) the current slice register float t1_behind6; // Variable to store the value behind (in the Z axis) the current slice register float t1_current; // Value in current slice for t+1 // Load ghost zones in_index += RADIUS*stride; t0_behind6 = __ldg(&a[in_index]); // Z = -R = -6 in_index += stride; t0_behind5 = __ldg(&a[in_index]); // Z = -R+1 = -5 in_index += stride; t0_behind4 = __ldg(&a[in_index]); // Z = -R+2 = -4 in_index += stride; t0_behind3 = __ldg(&a[in_index]); // Z = -R+3 = -3 in_index += stride; t0_behind2 = __ldg(&a[in_index]); // Z = -R+4 = -2 in_index += stride; t0_behind1 = __ldg(&a[in_index]); // Z = -R+5 = -1 in_index += stride; out_index = in_index; // Index for writing output, Z = 0 t0_current = __ldg(&a[in_index]); // Z = 0 in_index += stride; next_index = in_index; // Z = 1 t0_infront1 = __ldg(&a[in_index]); // Z = 1 in_index += stride; t0_infront2 = __ldg(&a[in_index]); // Z = 2 in_index += stride; t0_infront3 = __ldg(&a[in_index]); // Z = 3 in_index += stride; t0_infront4 = __ldg(&a[in_index]); // Z = 4 in_index += stride; t0_infront5 = __ldg(&a[in_index]); // Z = 5 in_index += stride; t0_infront6 = __ldg(&a[in_index]); // Z = R = 6 in_index += stride; // Compute stencil for Z = 0 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) ) { t1_current = coeff[0] * t0_current + coeff[1] * __ldg(&a[out_index - 6]) + coeff[2] * __ldg(&a[out_index - 5]) + coeff[3] * __ldg(&a[out_index - 4]) + coeff[4] * __ldg(&a[out_index - 3]) + coeff[5] * __ldg(&a[out_index - 2]) + coeff[6] * __ldg(&a[out_index - 1]) + coeff[7] * __ldg(&a[out_index + 1]) + coeff[8] * __ldg(&a[out_index + 2]) + coeff[9] * __ldg(&a[out_index + 3]) + coeff[10] * __ldg(&a[out_index + 4]) + coeff[11] * __ldg(&a[out_index + 5]) + coeff[12] * __ldg(&a[out_index + 6]) + coeff[13] * __ldg(&a[out_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[out_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[out_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[out_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[out_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[out_index - pitchedDimx]) + coeff[19] * __ldg(&a[out_index + pitchedDimx]) + coeff[20] * __ldg(&a[out_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[out_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[out_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[out_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[out_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_current = t0_current; } // Copy planes Z = -1 to -R to registers in t+1 (ghost zones, keep values in 0.0) t1_behind6 = t0_behind6; t1_behind5 = t0_behind5; t1_behind4 = t0_behind4; t1_behind3 = t0_behind3; t1_behind2 = t0_behind2; t1_behind1 = t0_behind1; t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; // Compute stencil for Z = 1 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront1 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront1 = t0_current; } t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 2 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront2 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront2 = t0_current; } t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 3 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront3 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront3 = t0_current; } t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 4 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront4 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront4 = t0_current; } t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = 5 (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (dimz > 1) ) { t1_infront5 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront5 = t0_current; } for (int i = 0; i < dimz-(4*RADIUS); i++) { // Load Z = (2R+i) to registers t0_behind6 = t0_behind5; t0_behind5 = t0_behind4; t0_behind4 = t0_behind3; t0_behind3 = t0_behind2; t0_behind2 = t0_behind1; t0_behind1 = t0_current; t0_current = t0_infront1; t0_infront1 = t0_infront2; t0_infront2 = t0_infront3; t0_infront3 = t0_infront4; t0_infront4 = t0_infront5; t0_infront5 = t0_infront6; t0_infront6 = __ldg(&a[in_index]); in_index += stride; next_index += stride; // Compute stencil for Z = R+i (t + 1) but exclude ghost zones if ( (row >= 2*RADIUS) && (row < (dimy + 2*RADIUS)) && (col >= PADDING_SIZE) && (col < (pitchedDimx - PADDING_SIZE)) && (i < dimz-RADIUS) ) { t1_infront6 = coeff[0] * t0_current + coeff[1] * __ldg(&a[next_index - 6]) + coeff[2] * __ldg(&a[next_index - 5]) + coeff[3] * __ldg(&a[next_index - 4]) + coeff[4] * __ldg(&a[next_index - 3]) + coeff[5] * __ldg(&a[next_index - 2]) + coeff[6] * __ldg(&a[next_index - 1]) + coeff[7] * __ldg(&a[next_index + 1]) + coeff[8] * __ldg(&a[next_index + 2]) + coeff[9] * __ldg(&a[next_index + 3]) + coeff[10] * __ldg(&a[next_index + 4]) + coeff[11] * __ldg(&a[next_index + 5]) + coeff[12] * __ldg(&a[next_index + 6]) + coeff[13] * __ldg(&a[next_index - 6*pitchedDimx]) + coeff[14] * __ldg(&a[next_index - 5*pitchedDimx]) + coeff[15] * __ldg(&a[next_index - 4*pitchedDimx]) + coeff[16] * __ldg(&a[next_index - 3*pitchedDimx]) + coeff[17] * __ldg(&a[next_index - 2*pitchedDimx]) + coeff[18] * __ldg(&a[next_index - pitchedDimx]) + coeff[19] * __ldg(&a[next_index + pitchedDimx]) + coeff[20] * __ldg(&a[next_index + 2*pitchedDimx]) + coeff[21] * __ldg(&a[next_index + 3*pitchedDimx]) + coeff[22] * __ldg(&a[next_index + 4*pitchedDimx]) + coeff[23] * __ldg(&a[next_index + 5*pitchedDimx]) + coeff[24] * __ldg(&a[next_index + 6*pitchedDimx]) + coeff[25] * t0_behind6 + coeff[26] * t0_behind5 + coeff[27] * t0_behind4 + coeff[28] * t0_behind3 + coeff[29] * t0_behind2 + coeff[30] * t0_behind1 + coeff[31] * t0_infront1 + coeff[32] * t0_infront2 + coeff[33] * t0_infront3 + coeff[34] * t0_infront4 + coeff[35] * t0_infront5 + coeff[36] * t0_infront6; } else { t1_infront6 = t0_current; } __syncthreads(); // Load Z = k (t + 1) to shared memory ds_a[threadIdx.y][threadIdx.x] = t1_current; __syncthreads(); // Compute stencil for Z = k (t + 2) but exclude halo zones if ( (threadIdx.y >= RADIUS) && (threadIdx.y < (BLOCK_DIMY - RADIUS)) && (threadIdx.x >= RADIUS) && (threadIdx.x < (BLOCK_DIMX - RADIUS)) ) { b[out_index] = coeff[0] * t1_current + coeff[1] * ds_a[threadIdx.y][threadIdx.x - 6] + coeff[2] * ds_a[threadIdx.y][threadIdx.x - 5] + coeff[3] * ds_a[threadIdx.y][threadIdx.x - 4] + coeff[4] * ds_a[threadIdx.y][threadIdx.x - 3] + coeff[5] * ds_a[threadIdx.y][threadIdx.x - 2] + coeff[6] * ds_a[threadIdx.y][threadIdx.x - 1] + coeff[7] * ds_a[threadIdx.y][threadIdx.x + 1] + coeff[8] * ds_a[threadIdx.y][threadIdx.x + 2] + coeff[9] * ds_a[threadIdx.y][threadIdx.x + 3] + coeff[10] * ds_a[threadIdx.y][threadIdx.x + 4] + coeff[11] * ds_a[threadIdx.y][threadIdx.x + 5] + coeff[12] * ds_a[threadIdx.y][threadIdx.x + 6] + coeff[13] * ds_a[threadIdx.y - 6][threadIdx.x] + coeff[14] * ds_a[threadIdx.y - 5][threadIdx.x] + coeff[15] * ds_a[threadIdx.y - 4][threadIdx.x] + coeff[16] * ds_a[threadIdx.y - 3][threadIdx.x] + coeff[17] * ds_a[threadIdx.y - 2][threadIdx.x] + coeff[18] * ds_a[threadIdx.y - 1][threadIdx.x] + coeff[19] * ds_a[threadIdx.y + 1][threadIdx.x] + coeff[20] * ds_a[threadIdx.y + 2][threadIdx.x] + coeff[21] * ds_a[threadIdx.y + 3][threadIdx.x] + coeff[22] * ds_a[threadIdx.y + 4][threadIdx.x] + coeff[23] * ds_a[threadIdx.y + 5][threadIdx.x] + coeff[24] * ds_a[threadIdx.y + 6][threadIdx.x] + coeff[25] * t1_behind6 + coeff[26] * t1_behind5 + coeff[27] * t1_behind4 + coeff[28] * t1_behind3 + coeff[29] * t1_behind2 + coeff[30] * t1_behind1 + coeff[31] * t1_infront1 + coeff[32] * t1_infront2 + coeff[33] * t1_infront3 + coeff[34] * t1_infront4 + coeff[35] * t1_infront5 + coeff[36] * t1_infront6; } out_index += stride; t1_behind6 = t1_behind5; t1_behind5 = t1_behind4; t1_behind4 = t1_behind3; t1_behind3 = t1_behind2; t1_behind2 = t1_behind1; t1_behind1 = t1_current; t1_current = t1_infront1; t1_infront1 = t1_infront2; t1_infront2 = t1_infront3; t1_infront3 = t1_infront4; t1_infront4 = t1_infront5; t1_infront5 = t1_infront6; } } void initGold(float *a, int dimx, int dimy, int dimz, int pitchedDimx) { int stride = pitchedDimx * (dimy+2*RADIUS); int index = 0; for (int i = 0; i < (dimz+2*RADIUS); i++) { for (int j = 0; j < (dimy+2*RADIUS); j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (i<RADIUS || j<RADIUS || i>=dimz+RADIUS || j>=dimy+RADIUS || k<PADDING_SIZE || k>=dimx+PADDING_SIZE) { a[index] = 0.0; } else { a[index] = 1.0; } } } } } void initGoldTemporal(float *a, int dimx, int dimy, int dimz, int pitchedDimx) { int stride = pitchedDimx * (dimy+4*RADIUS); int index = 0; for (int i = 0; i < (dimz+4*RADIUS); i++) { for (int j = 0; j < (dimy+4*RADIUS); j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if ( i<2*RADIUS || j<2*RADIUS || i>=dimz+2*RADIUS || j>=dimy+2*RADIUS || k<PADDING_SIZE || k>=dimx+PADDING_SIZE ) { a[index] = 0.0; } else { a[index] = 1.0; } } } } } void hostStencil(float *a, int t_end, int dimx, int dimy, int dimz, float *hcoeff, int pitchedDimx) { float *b; int stride = pitchedDimx * (dimy+2*RADIUS); b = (float *)malloc((dimz+2*RADIUS) * stride * sizeof(float)); initGold(b, dimx, dimy, dimz, pitchedDimx); int index = 0; for (int t = 0; t < t_end; t++) { for (int i = RADIUS; i < dimz+RADIUS; i++) { for (int j = RADIUS; j < dimy+RADIUS; j++) { for (int k = PADDING_SIZE; k < dimx+PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; if (t%2) { a[index] = hcoeff[0] * b[index] + hcoeff[1] * b[index - 6] + hcoeff[2] * b[index - 5] + hcoeff[3] * b[index - 4] + hcoeff[4] * b[index - 3] + hcoeff[5] * b[index - 2] + hcoeff[6] * b[index - 1] + hcoeff[7] * b[index + 1] + hcoeff[8] * b[index + 2] + hcoeff[9] * b[index + 3] + hcoeff[10] * b[index + 4] + hcoeff[11] * b[index + 5] + hcoeff[12] * b[index + 6] + hcoeff[13] * b[index - 6*pitchedDimx] + hcoeff[14] * b[index - 5*pitchedDimx] + hcoeff[15] * b[index - 4*pitchedDimx] + hcoeff[16] * b[index - 3*pitchedDimx] + hcoeff[17] * b[index - 2*pitchedDimx] + hcoeff[18] * b[index - pitchedDimx] + hcoeff[19] * b[index + pitchedDimx] + hcoeff[20] * b[index + 2*pitchedDimx] + hcoeff[21] * b[index + 3*pitchedDimx] + hcoeff[22] * b[index + 4*pitchedDimx] + hcoeff[23] * b[index + 5*pitchedDimx] + hcoeff[24] * b[index + 6*pitchedDimx] + hcoeff[25] * b[index - 6*stride] + hcoeff[26] * b[index - 5*stride] + hcoeff[27] * b[index - 4*stride] + hcoeff[28] * b[index - 3*stride] + hcoeff[29] * b[index - 2*stride] + hcoeff[30] * b[index - stride] + hcoeff[31] * b[index + stride] + hcoeff[32] * b[index + 2*stride] + hcoeff[33] * b[index + 3*stride] + hcoeff[34] * b[index + 4*stride] + hcoeff[35] * b[index + 5*stride] + hcoeff[36] * b[index + 6*stride]; } else { b[index] = hcoeff[0] * a[index] + hcoeff[1] * a[index - 6] + hcoeff[2] * a[index - 5] + hcoeff[3] * a[index - 4] + hcoeff[4] * a[index - 3] + hcoeff[5] * a[index - 2] + hcoeff[6] * a[index - 1] + hcoeff[7] * a[index + 1] + hcoeff[8] * a[index + 2] + hcoeff[9] * a[index + 3] + hcoeff[10] * a[index + 4] + hcoeff[11] * a[index + 5] + hcoeff[12] * a[index + 6] + hcoeff[13] * a[index - 6*pitchedDimx] + hcoeff[14] * a[index - 5*pitchedDimx] + hcoeff[15] * a[index - 4*pitchedDimx] + hcoeff[16] * a[index - 3*pitchedDimx] + hcoeff[17] * a[index - 2*pitchedDimx] + hcoeff[18] * a[index - pitchedDimx] + hcoeff[19] * a[index + pitchedDimx] + hcoeff[20] * a[index + 2*pitchedDimx] + hcoeff[21] * a[index + 3*pitchedDimx] + hcoeff[22] * a[index + 4*pitchedDimx] + hcoeff[23] * a[index + 5*pitchedDimx] + hcoeff[24] * a[index + 6*pitchedDimx] + hcoeff[25] * a[index - 6*stride] + hcoeff[26] * a[index - 5*stride] + hcoeff[27] * a[index - 4*stride] + hcoeff[28] * a[index - 3*stride] + hcoeff[29] * a[index - 2*stride] + hcoeff[30] * a[index - stride] + hcoeff[31] * a[index + stride] + hcoeff[32] * a[index + 2*stride] + hcoeff[33] * a[index + 3*stride] + hcoeff[34] * a[index + 4*stride] + hcoeff[35] * a[index + 5*stride] + hcoeff[36] * a[index + 6*stride]; } } } } } if (t_end%2) { for (int i = RADIUS; i < dimz+RADIUS; i++) { for (int j = RADIUS; j < dimy+RADIUS; j++) { for (int k = PADDING_SIZE; k < dimx+PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; a[index] = b[index]; } } } } free(b); } void hostStencilTemporal(float *a, int t_end, int dimx, int dimy, int dimz, float *hcoeff, int pitchedDimx) { float *b; int stride = pitchedDimx * (dimy+4*RADIUS); b = (float *)malloc((dimz+2*RADIUS) * stride * sizeof(float)); initGoldTemporal(b, dimx, dimy, dimz, pitchedDimx); int index = 0; for (int t = 0; t < t_end; t++) { for (int i = 2*RADIUS; i < dimz+2*RADIUS; i++) { for (int j = 2*RADIUS; j < dimy+2*RADIUS; j++) { for (int k = PADDING_SIZE; k < pitchedDimx-PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; if (t%2) { a[index] = hcoeff[0] * b[index] + hcoeff[1] * b[index - 6] + hcoeff[2] * b[index - 5] + hcoeff[3] * b[index - 4] + hcoeff[4] * b[index - 3] + hcoeff[5] * b[index - 2] + hcoeff[6] * b[index - 1] + hcoeff[7] * b[index + 1] + hcoeff[8] * b[index + 2] + hcoeff[9] * b[index + 3] + hcoeff[10] * b[index + 4] + hcoeff[11] * b[index + 5] + hcoeff[12] * b[index + 6] + hcoeff[13] * b[index - 6*pitchedDimx] + hcoeff[14] * b[index - 5*pitchedDimx] + hcoeff[15] * b[index - 4*pitchedDimx] + hcoeff[16] * b[index - 3*pitchedDimx] + hcoeff[17] * b[index - 2*pitchedDimx] + hcoeff[18] * b[index - pitchedDimx] + hcoeff[19] * b[index + pitchedDimx] + hcoeff[20] * b[index + 2*pitchedDimx] + hcoeff[21] * b[index + 3*pitchedDimx] + hcoeff[22] * b[index + 4*pitchedDimx] + hcoeff[23] * b[index + 5*pitchedDimx] + hcoeff[24] * b[index + 6*pitchedDimx] + hcoeff[25] * b[index - 6*stride] + hcoeff[26] * b[index - 5*stride] + hcoeff[27] * b[index - 4*stride] + hcoeff[28] * b[index - 3*stride] + hcoeff[29] * b[index - 2*stride] + hcoeff[30] * b[index - stride] + hcoeff[31] * b[index + stride] + hcoeff[32] * b[index + 2*stride] + hcoeff[33] * b[index + 3*stride] + hcoeff[34] * b[index + 4*stride] + hcoeff[35] * b[index + 5*stride] + hcoeff[36] * b[index + 6*stride]; } else { b[index] = hcoeff[0] * a[index] + hcoeff[1] * a[index - 6] + hcoeff[2] * a[index - 5] + hcoeff[3] * a[index - 4] + hcoeff[4] * a[index - 3] + hcoeff[5] * a[index - 2] + hcoeff[6] * a[index - 1] + hcoeff[7] * a[index + 1] + hcoeff[8] * a[index + 2] + hcoeff[9] * a[index + 3] + hcoeff[10] * a[index + 4] + hcoeff[11] * a[index + 5] + hcoeff[12] * a[index + 6] + hcoeff[13] * a[index - 6*pitchedDimx] + hcoeff[14] * a[index - 5*pitchedDimx] + hcoeff[15] * a[index - 4*pitchedDimx] + hcoeff[16] * a[index - 3*pitchedDimx] + hcoeff[17] * a[index - 2*pitchedDimx] + hcoeff[18] * a[index - pitchedDimx] + hcoeff[19] * a[index + pitchedDimx] + hcoeff[20] * a[index + 2*pitchedDimx] + hcoeff[21] * a[index + 3*pitchedDimx] + hcoeff[22] * a[index + 4*pitchedDimx] + hcoeff[23] * a[index + 5*pitchedDimx] + hcoeff[24] * a[index + 6*pitchedDimx] + hcoeff[25] * a[index - 6*stride] + hcoeff[26] * a[index - 5*stride] + hcoeff[27] * a[index - 4*stride] + hcoeff[28] * a[index - 3*stride] + hcoeff[29] * a[index - 2*stride] + hcoeff[30] * a[index - stride] + hcoeff[31] * a[index + stride] + hcoeff[32] * a[index + 2*stride] + hcoeff[33] * a[index + 3*stride] + hcoeff[34] * a[index + 4*stride] + hcoeff[35] * a[index + 5*stride] + hcoeff[36] * a[index + 6*stride]; } } } } } if (t_end%2) { for (int i = 2*RADIUS; i < dimz+2*RADIUS; i++) { for (int j = 2*RADIUS; j < dimy+2*RADIUS; j++) { for (int k = PADDING_SIZE; k < pitchedDimx-PADDING_SIZE; k++) { index = i*stride + j*pitchedDimx + k; a[index] = b[index]; } } } } free(b); } void printMatrix(float *a, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+2*RADIUS); for (int i=0; i < dimz+2*RADIUS; i++) { for (int j=0; j < dimy+2*RADIUS; j++) { for (int k=0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; printf("%f, ",a[index]); } printf("\n"); } printf("\n"); } } void printMatrixTemporal(float *a, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+4*RADIUS); for (int i=0; i < dimz+4*RADIUS; i++) { for (int j=0; j < dimy+4*RADIUS; j++) { for (int k=0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; printf("%f, ",a[index]); } printf("\n"); } printf("\n"); } } bool checkResult(float *a, float *ref, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+2*RADIUS); for (int i = 0; i < dimz+2*RADIUS; i++) { for (int j = 0; j < dimy+2*RADIUS; j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (a[index] != ref[index]) { printf("Expected: %f, received: %f at position [z=%d,y=%d,x=%d]\n",ref[index],a[index],i,j,k); return 0; } } } } return 1; } bool checkResultTemporal(float *a, float *ref, int pitchedDimx, int dimy, int dimz) { int index; int stride = pitchedDimx * (dimy+4*RADIUS); for (int i = 0; i < dimz+4*RADIUS; i++) { for (int j = 0; j < dimy+4*RADIUS; j++) { for (int k = 0; k < pitchedDimx; k++) { index = i*stride + j*pitchedDimx + k; if (a[index] != ref[index]) { printf("Expected: %f, received: %f at position [z=%d,y=%d,x=%d]\n",ref[index],a[index],i,j,k); return 0; } } } } return 1; } int main(int argc, char* argv[]) { float *h_a, *h_gold_a; float *d_a, *d_b; float hcoeff[RADIUS*6+1] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; cudaEvent_t t0, t1, t2, t3, t4, t5; float init, host_comp, host2gpu, gpu2host, gpu_comp, tot; int dimx, dimy, dimz, t_end; long points, flop; float gFlops; int opt; // Variable to select the optimization char vbs = 0; if (argc == 7) { vbs = 1; } else { if (argc != 6) { printf("use: <exec> <OPT> <DIMX> <DIMY> <DIMZ> <T_END> <VBS(1)>\n" "Available optimizations (value should be used as the first parameter in the command line):\n" "0 - Base -> no optimization\n" "1 - Sham -> shared memory\n" "2 - ZintReg -> for iteration on Z axis (Paulius)\n" "3 - Zint -> for iteration on Z axis without using registers\n" "4 - ShamZintReg -> shared memory + for iteration on Z axis\n" "5 - ShamZint -> shared memory + for iteration on Z axis without registers\n" "6 - ShamZintTempReg -> shared memory + for iteration on Z axis + temporal blocking\n" "7 - Roc -> use of read only cache (__restrict__ and const modifiers)\n" "8 - ShamRoc -> use of shared memory + read only cache (__restrict__ and const modifiers)\n" "9 - RocZintReg -> for iteration on Z axis + read only cache\n" "10 - RocZint -> for iteration on Z axis without registers + read only cache\n" "11 - ShamRocZintTempReg -> shared memory + read only cache + for iteration on Z axis + temporal blocking\n" ); exit(-1); } } opt = atoi(argv[1]); dimx = atoi(argv[2]); dimy = atoi(argv[3]); dimz = atoi(argv[4]); t_end = atoi(argv[5]); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); cudaEventCreate(&t0); cudaEventCreate(&t1); cudaEventCreate(&t2); cudaEventCreate(&t3); cudaEventCreate(&t4); cudaEventCreate(&t5); int pitchedDimx = dimx + 2*PADDING_SIZE; int gold_size; // If temporal blocking is requested, allocate more device memory if ( (opt == 6) || (opt == 11) ) { gold_size = pitchedDimx * (dimy+4*RADIUS) * (dimz+4*RADIUS) * sizeof(float); // Check if the number of iterations is even if ( (t_end%2) != 0) { if (vbs == 0) printf("Number of time iterations is odd, adding one iteration!\n"); t_end++; } } else { gold_size = pitchedDimx * (dimy+2*RADIUS) * (dimz+2*RADIUS) * sizeof(float); } points = (long)dimx * (long)dimy * (long)dimz * (long)t_end; flop = (long)((6*RADIUS) + ((6*RADIUS)+1)) * points; // 36 adds, 37 multiplies cudaEventRecord(t0); /* allocate device variables */ wbCheck(cudaMalloc((void**) &d_a, gold_size)); wbCheck(cudaMalloc((void**) &d_b, gold_size)); /* allocate host variables */ h_a = (float *)malloc(gold_size); h_gold_a = (float *)malloc(gold_size); if ( (opt == 6) || (opt == 11) ) { initGoldTemporal(h_a, dimx, dimy, dimz, pitchedDimx); initGoldTemporal(h_gold_a, dimx, dimy, dimz, pitchedDimx); } else { initGold(h_a, dimx, dimy, dimz, pitchedDimx); initGold(h_gold_a, dimx, dimy, dimz, pitchedDimx); } cudaEventRecord(t1); if (vbs == 0) { if ( (opt == 6) || (opt == 11) ) { hostStencilTemporal(h_gold_a, t_end, dimx, dimy, dimz, hcoeff, pitchedDimx); } else { hostStencil(h_gold_a, t_end, dimx, dimy, dimz, hcoeff, pitchedDimx); } } #ifdef PRINT_GOLD if ( (opt == 6) || (opt == 11) ) { printMatrixTemporal(h_gold_a, pitchedDimx, dimy, dimz); } else { printMatrix(h_gold_a, pitchedDimx, dimy, dimz); } #endif cudaEventRecord(t2); wbCheck(cudaMemcpyToSymbol(coeff, hcoeff, sizeof(hcoeff))); wbCheck(cudaMemcpy(d_a, h_a, gold_size, cudaMemcpyHostToDevice)); // Initialize device values wbCheck(cudaMemcpy(d_b, d_a, gold_size, cudaMemcpyDeviceToDevice)); // Copy contents from d_a to d_b cudaEventRecord(t3); dim3 dimBlock; dim3 dimGrid; switch (opt) { case 0: if (vbs == 0) printf("Optimization level: 0 - Base\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilBase <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy); } else { calcStencilBase <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy); } wbCheck(cudaGetLastError()); } break; case 1: if (vbs == 0) printf("Optimization level: 1 - Sham\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilSham <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy); } else { calcStencilSham <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy); } wbCheck(cudaGetLastError()); } break; case 2: if (vbs == 0) printf("Optimization level: 2 - ZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilZintReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilZintReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 3: if (vbs == 0) printf("Optimization level: 3 - Zint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilZint <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilZint <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 4: if (vbs == 0) printf("Optimization level: 4 - ShamZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilShamZintReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilShamZintReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 5: if (vbs == 0) printf("Optimization level: 5 - ShamZint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilShamZint <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilShamZint <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 6: if (vbs == 0) printf("Optimization level: 6 - ShamZintTempReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/(BLOCK_DIMX-2*RADIUS)); dimGrid.y = (int)ceil(dimy/(BLOCK_DIMY-2*RADIUS)); dimGrid.z = 1; for (int i = 0; i < t_end/2; i++) { if (i%2) { calcStencilShamZintTempReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilShamZintTempReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 7: if (vbs == 0) printf("Optimization level: 7 - Roc\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilRoc <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy); } else { calcStencilRoc <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy); } wbCheck(cudaGetLastError()); } break; case 8: if (vbs == 0) printf("Optimization level: 8 - ShamRoc\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = BLOCK_DIMZ; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = (int)ceil(dimz/BLOCK_DIMZ); for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilShamRoc <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy); } else { calcStencilShamRoc <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy); } wbCheck(cudaGetLastError()); } break; case 9: if (vbs == 0) printf("Optimization level: 9 - RocZintReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilRocZintReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilRocZintReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 10: if (vbs == 0) printf("Optimization level: 10 - RocZint\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/BLOCK_DIMX); dimGrid.y = (int)ceil(dimy/BLOCK_DIMY); dimGrid.z = 1; for (int i = 0; i < t_end; i++) { if (i%2) { calcStencilRocZint <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilRocZint <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; case 11: if (vbs == 0) printf("Optimization level: 11 - ShamRocZintTempReg\n"); dimBlock.x = BLOCK_DIMX; dimBlock.y = BLOCK_DIMY; dimBlock.z = 1; dimGrid.x = (int)ceil(dimx/(BLOCK_DIMX-2*RADIUS)); dimGrid.y = (int)ceil(dimy/(BLOCK_DIMY-2*RADIUS)); dimGrid.z = 1; for (int i = 0; i < t_end/2; i++) { if (i%2) { calcStencilShamRocZintTempReg <<< dimGrid,dimBlock >>> (d_b, d_a, pitchedDimx, dimy, dimz); } else { calcStencilShamRocZintTempReg <<< dimGrid,dimBlock >>> (d_a, d_b, pitchedDimx, dimy, dimz); } wbCheck(cudaGetLastError()); } break; default: printf("Invalid optimization selected\n"); break; } cudaEventRecord(t4); cudaDeviceSynchronize(); if ( (opt == 6) || (opt == 11) ) { if ((t_end/2)%2) { wbCheck(cudaMemcpy(h_a, d_b, gold_size, cudaMemcpyDeviceToHost)); } else { wbCheck(cudaMemcpy(h_a, d_a, gold_size, cudaMemcpyDeviceToHost)); } } else { if (t_end%2) { wbCheck(cudaMemcpy(h_a, d_b, gold_size, cudaMemcpyDeviceToHost)); } else { wbCheck(cudaMemcpy(h_a, d_a, gold_size, cudaMemcpyDeviceToHost)); } } cudaEventRecord(t5); cudaFree(d_a); cudaFree(d_b); #ifdef PRINT_RESULT if ( (opt == 6) || (opt == 11) ) { printMatrixTemporal(h_a,pitchedDimx,dimy,dimz); } else { printMatrix(h_a,pitchedDimx,dimy,dimz); } #endif if (vbs == 0) { if ( (opt == 6) || (opt == 11) ) { if (checkResultTemporal(h_a,h_gold_a,pitchedDimx,dimy,dimz)) { printf("Correct results!\n"); } else { printf("Wrong results!!!!!!\n"); } } else { if (checkResult(h_a,h_gold_a,pitchedDimx,dimy,dimz)) { printf("Correct results!\n"); } else { printf("Wrong results!!!!!!\n"); } } } cudaEventSynchronize(t5); cudaEventElapsedTime(&init, t0, t1); cudaEventElapsedTime(&host_comp, t1, t2); cudaEventElapsedTime(&host2gpu, t2, t3); cudaEventElapsedTime(&gpu_comp, t3, t4); cudaEventElapsedTime(&gpu2host, t4, t5); cudaEventElapsedTime(&tot, t0, t5); gFlops = (1.0e-6)*flop/gpu_comp; free(h_a); free(h_gold_a); if (vbs == 0) { printf("GPU Clock: %d MHz\n",prop.clockRate/1000); printf("DIM = %dx%dx%d; T_END = %d; BLOCK_WIDTH = %dx%dx%d\n", dimx,dimy,dimz,t_end,BLOCK_DIMX,BLOCK_DIMY,BLOCK_DIMZ); printf("init=%f, host_comp=%f, host2gpu=%f, gpu_comp=%f, gpu2host=%f, tot=%f \n", init, host_comp, host2gpu, gpu_comp, gpu2host, tot); printf("Stencil Throughput: %f Gpts/s\n", (1.0e-6*points)/gpu_comp); // gpu_comp is measured in ms printf("gFlops = %f GFLOPs\n", gFlops); printf("\n"); } else { printf("%d,%d,%d,%f,%f\n", dimx,dimy,dimz,gFlops,gpu_comp); } return 0; }
628ef7dd23aee9790063690f20ca940f876666b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "KinectFusion.h" #undef isnan #undef isfinite #include <iostream> using namespace std; namespace { ///Paper: Interactive Ray Tracing for Isosurface Rendering __device__ __forceinline__ float4 RaycastForPos(const Volume volume, const uint2 pos, const Matrix4f view, ///< camera to world const float near_plane, const float far_plane, const float step, const float large_step) { const float3 origin = make_float3(view(0, 3), view(1, 3), view(2, 3)); const float3 direction = rotate(view, make_float3(pos.x, pos.y, 1)); // intersect ray with a box, compute intersection of ray with six box planes // x = dx*t + px // y = dy*t + py // z = dz*t + pz // t means the length of the ray from origin to (x, y, z) const float3 invR = make_float3(1.0f) / direction; ///< 1 / direction /// make x = 0, then we'll get equation 0 = dx*t + px -> t = - px/dx const float3 bot = -1 * invR * origin; /// make x = dimx, then we'll get equation dimx = dx*t + px -> t = (dimx - px) / dx const float3 top = invR * (volume.m_dim - origin); ///so bot stores length of the ray with (0, 0, 0) endpoint //top stores length of the ray with (dimx, dimy, dimz) endpoint // reorder intersections to find smallest and largest on each axis const float3 tmin = fminf(bot, top); const float3 tmax = fmaxf(bot, top); const float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); const float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); // get the nearest and the most far distance for cull //const float tnear = fmaxf(largest_tmin. near_plane); // why error here? the dot '.' const float tnear = fmaxf(largest_tmin, near_plane); const float tfar = fminf(smallest_tmax, far_plane); if (tnear < tfar) { // first walk with large_step to find a hit float t = tnear; float step_size = large_step; float f_t = volume.interp(origin + direction * t); float f_tt = 0; if (f_t > 0) { for (; t < tfar; t += step_size ) { f_tt = volume.interp(origin + direction * t); if (f_tt < 0) { // hit found break; } if (f_tt < 0.8f) { //change to a smaller step_size step_size = step; } f_t = f_tt; } if (f_tt < 0) { // hit found t = t + step_size * f_tt / (f_t - f_tt); // interpolation return make_float4(origin + direction * t, t); /// x = px + dx * t } } } return make_float4(0); } inline __device__ float sqr(const float x) { return x * x; } // TODO(): shared memory acceleration __global__ void BilateralFilterKernel( Image<float> out, const Image<float> in, const Image<float> gaussian, const float sigma, //sigma for illuminance const int radius ) { const uint2 pix = thr2pos2(); if (in[pix] == 0) { out[pix] = 0; return; } float sum = 0; float t = 0; const float center = in[pix]; for (int i = -radius; i <= radius; i++) { for (int j = - radius; j <= radius; j++) { // const float pixel = in[make_uint2(clamp(pix.x + i, 0u, in.m_size.x - 1), clamp(pix.y + j, 0u, in.m_size.y - 1))]; const float pixel = in[clamp( make_uint2(pix.x + i, pix.y + j), make_uint2(0u, 0u), make_uint2(in.m_size.x - 1, in.m_size.y - 1) )]; if (pixel > 0) { const float mod = sqr(pixel - center); const float factor = gaussian[make_uint2(i + radius, 0)] * gaussian[make_uint2(j + radius, 0)] * __expf(-mod / (2 * sqr(sigma))); t += factor * pixel; sum += factor; } } } out[pix] = t / sum; } __global__ void HalfSampleRobustKernel( Image<float> out, const Image<float> in, const float robus_threshold, const int radius ) { const uint2 pix = thr2pos2(); const uint2 center_pix = 2 * pix; if (pix.x >= out.m_size.x || pix.y >= out.m_size.y) { return; } float sum = 0; float t = 0; const float center_pixel = in[center_pix]; for (int i = -radius + 1; i <= radius; i++) { for (int j = - radius + 1; j <= radius; j++) { const uint2 pix_in = clamp( make_uint2(center_pix.x + i, center_pix.y + j), make_uint2(0u, 0u), make_uint2(in.m_size.x - 1, in.m_size.y - 1) ); const float pixel = in[pix_in]; if (fabsf(pixel - center_pixel) < robus_threshold) { //accept illuminance within neighberhood in threshold sum += 1; t += pixel; } } } out[pix] = t / sum; } // gaussian for space difference __global__ void GenerateGaussianKernel(Image<float> out, float sigma, int radius) { int x = threadIdx.x - radius; out[make_uint2(threadIdx.x, 0)] = __expf(-sqr(x) / (2 * sqr(sigma))); } __global__ void Depth2VertexKernel(Image<float3> vertex, const Image<float> depth, const Matrix3f invK) { const uint2 pix = thr2pos2(); if (pix.x >= depth.m_size.x || pix.y >= depth.m_size.y) { return; } if (depth[pix] > 0) { vertex[pix] = depth[pix] * (invK * make_float3(pix.x, pix.y, 1)); } else { vertex[pix] = make_float3(0); } } __global__ void Vertex2NormalKernel(Image<float3> normal, const Image<float3> vertex) { const uint2 pix = thr2pos2(); if (pix.x >= vertex.m_size.x || pix.y >= vertex.m_size.y) { return; } const float3 left = vertex[make_uint2(max(static_cast<int>(pix.x) - 1, 0), pix.y)]; const float3 right = vertex[make_uint2(min(pix.x + 1, vertex.m_size.x - 1), pix.y)]; const float3 up = vertex[make_uint2(pix.x, max(static_cast<int>(pix.y) - 1, 0))]; const float3 down = vertex[make_uint2(pix.x, min(pix.y + 1, vertex.m_size.y - 1))]; if (left.z == 0 || right.z == 0 || up.z == 0 || down.z == 0) { normal[pix].x = INVALID; return; } const float3 dxv = right - left; const float3 dyv = down - up; normal[pix] = normalize(cross(dyv, dxv)); } /*! * f = sqr(n^T(p - p')) * e = n^T(p - p') * r = p - p' * J_r = ( I, -p'^ ) * J_e = n^TJ_r = n^T( I, -p'^ ) */ __global__ void TrackKernel( Image<TrackData> out, const Image<float3> input_vertex, const Image<float3> input_normal, const Image<float3> ref_vertex, const Image<float3> ref_normal, const Matrix4f track_pose, const Matrix4f K_inv_raycast_pose, const float dist_threshold, const float normal_threshold ) { const uint2 pix = thr2pos2(); if (pix.x >= input_vertex.m_size.x || pix.y >= input_vertex.m_size.y) { return; } TrackData & row = out[pix]; if (input_normal[pix].x == INVALID) { row.result = -1; return; } // find corresponding vertex from input_vertex to ref_vertex // project vertex from current camera to world const float3 projected_vertex = transform(track_pose, input_vertex[pix]); // project vertex from world to image with raycast pose const float3 projected_pos = transform(K_inv_raycast_pose, projected_vertex); // get image position const float2 projected_pix = make_float2( projected_pos.x / projected_pos.z + 0.5f, projected_pos.y / projected_pos.z + 0.5f ); // out of border if (projected_pix.x < 0 || projected_pix.x >= ref_vertex.m_size.x || projected_pix.y < 0 || projected_pix.y >= ref_vertex.m_size.y) { row.result = -2; return; } const uint2 ref_pix = make_uint2(projected_pix.x, projected_pix.y); const float3 ref_normal_pixel = ref_normal[ref_pix]; if (ref_normal_pixel.x == INVALID) { row.result = -3; return; } // Euclidean difference const float3 diff = ref_vertex[ref_pix] - projected_vertex; // project normal from current camera to world const float3 projected_normal = rotate(track_pose, input_normal[pix]); // outlier if (length(diff) > dist_threshold) { row.result = -4; return; } //TODO: it seems that projected_normal is in world, but ref_normal is in raycast camera //if the normal is figured out from TSDF, it's OK. if (dot(projected_normal, ref_normal_pixel) < normal_threshold) { row.result = -5; return; } row.result = 1; row.error = dot(ref_normal_pixel, diff); reinterpret_cast<float3 *>(row.J)[0] = ref_normal_pixel; // N reinterpret_cast<float3 *>(row.J)[1] = cross(projected_vertex, ref_normal_pixel); // N dot (-p'^) } //TODO: it can be accelerated by reduce primitive __global__ void ReduceKernel(float * out, const Image<TrackData> J, const uint2 size){ __shared__ float S[112][32]; // this is for the final accumulation const uint sline = threadIdx.x; float sums[32]; // 1 for error, 21 for jtj, 6 for jte, 4 for vertex info float * jtj = sums + 7; float * info = sums + 28; for(uint i = 0; i < 32; ++i) sums[i] = 0; for(uint y = blockIdx.x; y < size.y; y += gridDim.x){ for(uint x = sline; x < size.x; x += blockDim.x ){ const TrackData & row = J[make_uint2(x, y)]; if(row.result < 1){ info[1] += row.result == -4 ? 1 : 0; info[2] += row.result == -5 ? 1 : 0; info[3] += row.result > -4 ? 1 : 0; continue; } // Error part sums[0] += row.error * row.error; // JTe part for(int i = 0; i < 6; ++i) sums[i+1] += row.error * row.J[i]; // JTJ part, unfortunatly the double loop is not unrolled well... jtj[0] += row.J[0] * row.J[0]; jtj[1] += row.J[0] * row.J[1]; jtj[2] += row.J[0] * row.J[2]; jtj[3] += row.J[0] * row.J[3]; jtj[4] += row.J[0] * row.J[4]; jtj[5] += row.J[0] * row.J[5]; jtj[6] += row.J[1] * row.J[1]; jtj[7] += row.J[1] * row.J[2]; jtj[8] += row.J[1] * row.J[3]; jtj[9] += row.J[1] * row.J[4]; jtj[10] += row.J[1] * row.J[5]; jtj[11] += row.J[2] * row.J[2]; jtj[12] += row.J[2] * row.J[3]; jtj[13] += row.J[2] * row.J[4]; jtj[14] += row.J[2] * row.J[5]; jtj[15] += row.J[3] * row.J[3]; jtj[16] += row.J[3] * row.J[4]; jtj[17] += row.J[3] * row.J[5]; jtj[18] += row.J[4] * row.J[4]; jtj[19] += row.J[4] * row.J[5]; jtj[20] += row.J[5] * row.J[5]; // extra info here info[0] += 1; } } for(int i = 0; i < 32; ++i) // copy over to shared memory S[sline][i] = sums[i]; __syncthreads(); // wait for everyone to finish if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads for(unsigned i = 1; i < blockDim.x; ++i) S[0][sline] += S[i][sline]; out[sline+blockIdx.x*32] = S[0][sline]; } } __global__ void IntegrateKernel(Volume volume, const Image<float> depth, const Matrix4f inv_track, const Matrix3f K, const float mu, const float max_weight ) { uint3 pix = make_uint3(thr2pos2()); //'auto' inference will result in deadly memory illegal access! float3 pos = transform(inv_track, volume.pos(pix)); float3 cameraX = K * pos; // (u, v, 0) in image coordinate // each increment `delta` is a voxel on z-axis in fact. const float3 delta = rotate(inv_track, make_float3(0, 0, volume.m_dim.z / volume.m_size.z)); const float3 camera_delta = K * delta; // each thread manipulate a column for (pix.z = 0; pix.z < volume.m_size.z; pix.z++, pos += delta, cameraX += camera_delta) { if (pos.z < 0.001f) { continue; } //project the voxel on image plane //why external 0.5? const float2 pixel = make_float2(cameraX.x / cameraX.z + 0.5f, cameraX.y / cameraX.z + 0.5f); if (pixel.x < 0 || pixel.x >= depth.m_size.x || pixel.y < 0 || pixel.y >= depth.m_size.y) { continue; } const uint2 px = make_uint2(pixel.x , pixel.y); if (depth[px] == 0) { continue; } const float diff = (depth[px] - cameraX.z) * (1 +sqr(pos.x / pos.z) + sqr(pos.y / pos.z)); //depth on image is vertical depth, so we use Z rather that length of ray // scalar = (sqr(x) + sqr(y) + sqr(z))/sqr(z) // diff = distance * sqr(atan(theta)) - a approximation of projective diff //update the voxel when depth difference less than threshold if (diff > -mu) { const float sdf = fminf(1.0f, diff/mu); float2 data = volume[pix]; //x = tsdf, y = weight data.x = clamp((data.y * data.x + sdf) / (data.y + 1), -1.0f, 1.0f); data.y = fminf(data.y + 1, max_weight); volume.set(pix, data); } } } } /// this kernel gives an externel depth map for visualization __global__ void RaycastDepthImageKernel( Image<float3> pos3D, // Image<float3> normal, Image<float> depth, Volume volume, Matrix4f view, float near_plane, float far_plane, float step, float large_step) { const auto pos = thr2pos2(); const float4 hit = RaycastForPos( volume, pos, view, near_plane, far_plane, step, large_step ); if (hit.w > 0) { pos3D[pos] = make_float3(hit); depth[pos] = hit.w; } else { pos3D[pos] = make_float3(0); depth[pos] = 0; } } __global__ void RaycastWithNormalKernel( Image<float3> pos3D, Image<float3> normal, Volume volume, Matrix4f view, float near_plane, float far_plane, float step, float large_step) { const auto pos = thr2pos2(); const float4 hit = RaycastForPos( volume, pos, view, near_plane, far_plane, step, large_step ); if (hit.w > 0) { pos3D[pos] = make_float3(hit); float3 surf_normal = volume.grad(make_float3(hit)); if (length(surf_normal) == 0) { normal[pos].x = INVALID; } else { normal[pos] = normalize(surf_normal); } } else { pos3D[pos] = make_float3(0); normal[pos] = make_float3(INVALID, 0, 0); } } KinectFusion::KinectFusion(const Parameters& parameters): m_parameters(parameters) { m_volume.Init(parameters.VolumeSize, parameters.VolumeDimensions); // m_volume.SetBoxWrap(make_float3(0.1f, 0.1f, 0.8f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); // m_volume.SetBoxWrap(make_float3(0.1f, 0.8f, 0.1f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); // m_volume.SetBoxWrap(make_float3(0.8f, 0.1f, 0.1f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); hipSetDeviceFlags(hipDeviceMapHost); m_output.Allocate(parameters.InputSize); m_ba_values.Allocate(make_uint2(32, 8)); m_reduction.Allocate(parameters.InputSize); m_raw_depth.Allocate(parameters.InputSize); m_vertex.Allocate(parameters.InputSize); m_normal.Allocate(parameters.InputSize); m_cameraK = m_parameters.CameraK; m_input_depth.resize(parameters.ICPLevels); m_input_vertex.resize(parameters.ICPLevels); m_input_normal.resize(parameters.ICPLevels); m_inv_cameraKs.resize(parameters.ICPLevels); for (auto i = 0; i < parameters.ICPLevels; i++) { m_input_depth[i].Allocate(parameters.InputSize >> i); m_input_vertex[i].Allocate(parameters.InputSize >> i); m_input_normal[i].Allocate(parameters.InputSize >> i); m_inv_cameraKs[i] << (1 << i) / m_cameraK(0, 0) , 0, - m_cameraK(0, 2) / m_cameraK(0, 0), 0, (1 << i) / m_cameraK(1, 1), - m_cameraK(1, 2)/ m_cameraK(1, 1), 0, 0, 1; } m_gaussian.Allocate(make_uint2(parameters.GaussianRadius * 2 + 1, 1)); hipLaunchKernelGGL(( GenerateGaussianKernel), dim3(1), dim3(m_gaussian.m_size.x), 0, 0, m_gaussian, parameters.GaussianFunctionSigma, parameters.GaussianRadius ); } void KinectFusion::Raycast() { m_raycast_pose = m_pose; dim3 block(16, 16); std::cout << "near plane = " << m_parameters.NearPlane << std::endl; std::cout << "far plane = " << m_parameters.FarPlane << std::endl; std::cout << "step size = " << StepSize() << std::endl; std::cout << "large step size = " << LargeStepSize() << std::endl; std::cout << "Pose = " << std::endl; Matrix4f m = Matrix4f::Identity(); m(0, 0) = m_cameraK(0, 0); m(1, 1) = m_cameraK(1, 1); m(0, 2) = m_cameraK(0, 2); m(1, 2) = m_cameraK(1, 2); m = m_raycast_pose * m.inverse(); std::cout << m << std::endl; hipLaunchKernelGGL(( RaycastDepthImageKernel), dim3(divup(m_parameters.InputSize, block)), dim3(block), 0, 0, m_vertex, m_output.GetDeviceImage(), m_volume, m, m_parameters.NearPlane, m_parameters.FarPlane, StepSize(), LargeStepSize() ); hipLaunchKernelGGL(( RaycastWithNormalKernel), dim3(divup(m_parameters.InputSize, block)), dim3(block), 0, 0, m_vertex, m_normal, m_volume, m, m_parameters.NearPlane, m_parameters.FarPlane, StepSize(), LargeStepSize() ); } void KinectFusion::Integrate() { Matrix4f inverse_pose = m_pose.inverse(); std::cout << "inverse pose:" << std::endl << inverse_pose << std::endl; hipLaunchKernelGGL(( IntegrateKernel), dim3(divup(dim3(m_volume.m_size.x, m_volume.m_size.y), m_parameters.ImageBlock)), dim3(m_parameters.ImageBlock), 0, 0, m_volume, m_raw_depth, inverse_pose, m_parameters.CameraK, m_parameters.FusionThreshold, m_parameters.MaxWeight ); } /*! * 1.bilateral filter and down sample(coarse to fine) * 2.Coarse to fine iteration: * 1.compute vertex and normal from raycast * 2.compute jaccobian * 3.reduce jaccobian and solve linear equation * 4.update pose, goto 1(optimize iteration) */ bool KinectFusion::Track() { std::vector<dim3> grids; for (auto i = 0; i < m_parameters.ICPLevels; i++) { grids.push_back(divup(m_parameters.InputSize >> i, m_parameters.ImageBlock)); } // bilateral filter hipLaunchKernelGGL(( BilateralFilterKernel), dim3(grids[0]), dim3(m_parameters.ImageBlock), 0, 0, m_input_depth[0], m_raw_depth, m_gaussian, m_parameters.GaussianIlluminanceSigma, m_parameters.GaussianRadius ); // downsample std::cout << "Gausian ill = " << m_parameters.GaussianIlluminanceSigma << std::endl; std::cout << "Gausian Radius = " << m_parameters.GaussianRadius << std::endl; for (auto i = 1; i < m_parameters.ICPLevels; i++) { hipLaunchKernelGGL(( HalfSampleRobustKernel), dim3(grids[i]), dim3(m_parameters.ImageBlock), 0, 0, m_input_depth[i], m_input_depth[i-1], m_parameters.GaussianIlluminanceSigma * 3, 1 ); } for (auto itr = 0; itr < m_parameters.ICPLevels; itr++) { hipLaunchKernelGGL(( Depth2VertexKernel), dim3(grids[itr]), dim3(m_parameters.ImageBlock), 0, 0, m_input_vertex[itr], m_input_depth[itr], m_inv_cameraKs[itr] ); hipLaunchKernelGGL(( Vertex2NormalKernel), dim3(grids[itr]), dim3(m_parameters.ImageBlock), 0, 0, m_input_normal[itr], m_input_vertex[itr] ); } const Matrix4f old_pose = m_pose; const Matrix4f inv_raycast_pose = m_raycast_pose.inverse(); const Matrix4f project_ref = combine_intrinsics(m_cameraK, inv_raycast_pose); auto values = Eigen::Map<Eigen::Matrix<float, 8, 32, Eigen::RowMajor>>(m_ba_values.Data()); std::cout << "---------------------------------" << std::endl; std::cout << "ICP Track : " << std::endl; for (auto level = m_parameters.ICPLevels - 1; level >= 0; level--) { std::cout << "level " << level << "::::" << std::endl; for (auto itr = 0; itr < m_parameters.ICPIterationTimes[level]; itr++) { std::cout << "itr " << itr << ": "; hipLaunchKernelGGL(( TrackKernel), dim3(grids[level]), dim3(m_parameters.ImageBlock), 0, 0, m_reduction, m_input_vertex[level], m_input_normal[level], m_vertex, m_normal, m_pose, project_ref, 0.2f, // dist_threshold 0.8f // normal_threshold ); hipLaunchKernelGGL(( ReduceKernel), dim3(8), dim3(112), 0, 0, m_ba_values.GetDeviceImage().Data(), m_reduction, m_input_vertex[level].m_size ); hipDeviceSynchronize(); //synchronize host pin memory // solve linear equation Vector32f v = values.colwise().sum(); std::cout << "inlier pts = " << v(28) << std::endl; Vector6f delta_se3x = solve(Vector27f(v.segment(1, 27))); // SE3 transform std::cout << "exp pose = " << std::endl << exp(delta_se3x) << std::endl; m_pose = exp(delta_se3x) * m_pose; std::cout << "err = : " << v(0) << std::endl; std::cout << "delta_se3-: " << delta_se3x << std::endl; if (delta_se3x.norm() < 1e-5) { std::cout << "Ahead of optimization" << std::endl; break; } } } Vector32f v = values.colwise().sum(); if ((sqrt(v(0) / v(28)) > 2e-2) || (v(28)) / (m_raw_depth.m_size.x * m_raw_depth.m_size.y) < 0.15f) { std::cout << "Don't update pose" << std::endl; m_pose = old_pose; return false; } return true; }
628ef7dd23aee9790063690f20ca940f876666b3.cu
#include "KinectFusion.h" #undef isnan #undef isfinite #include <iostream> using namespace std; namespace { ///Paper: Interactive Ray Tracing for Isosurface Rendering __device__ __forceinline__ float4 RaycastForPos(const Volume volume, const uint2 pos, const Matrix4f view, ///< camera to world const float near_plane, const float far_plane, const float step, const float large_step) { const float3 origin = make_float3(view(0, 3), view(1, 3), view(2, 3)); const float3 direction = rotate(view, make_float3(pos.x, pos.y, 1)); // intersect ray with a box, compute intersection of ray with six box planes // x = dx*t + px // y = dy*t + py // z = dz*t + pz // t means the length of the ray from origin to (x, y, z) const float3 invR = make_float3(1.0f) / direction; ///< 1 / direction /// make x = 0, then we'll get equation 0 = dx*t + px -> t = - px/dx const float3 bot = -1 * invR * origin; /// make x = dimx, then we'll get equation dimx = dx*t + px -> t = (dimx - px) / dx const float3 top = invR * (volume.m_dim - origin); ///so bot stores length of the ray with (0, 0, 0) endpoint //top stores length of the ray with (dimx, dimy, dimz) endpoint // reorder intersections to find smallest and largest on each axis const float3 tmin = fminf(bot, top); const float3 tmax = fmaxf(bot, top); const float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); const float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); // get the nearest and the most far distance for cull //const float tnear = fmaxf(largest_tmin. near_plane); // why error here? the dot '.' const float tnear = fmaxf(largest_tmin, near_plane); const float tfar = fminf(smallest_tmax, far_plane); if (tnear < tfar) { // first walk with large_step to find a hit float t = tnear; float step_size = large_step; float f_t = volume.interp(origin + direction * t); float f_tt = 0; if (f_t > 0) { for (; t < tfar; t += step_size ) { f_tt = volume.interp(origin + direction * t); if (f_tt < 0) { // hit found break; } if (f_tt < 0.8f) { //change to a smaller step_size step_size = step; } f_t = f_tt; } if (f_tt < 0) { // hit found t = t + step_size * f_tt / (f_t - f_tt); // interpolation return make_float4(origin + direction * t, t); /// x = px + dx * t } } } return make_float4(0); } inline __device__ float sqr(const float x) { return x * x; } // TODO(): shared memory acceleration __global__ void BilateralFilterKernel( Image<float> out, const Image<float> in, const Image<float> gaussian, const float sigma, //sigma for illuminance const int radius ) { const uint2 pix = thr2pos2(); if (in[pix] == 0) { out[pix] = 0; return; } float sum = 0; float t = 0; const float center = in[pix]; for (int i = -radius; i <= radius; i++) { for (int j = - radius; j <= radius; j++) { // const float pixel = in[make_uint2(clamp(pix.x + i, 0u, in.m_size.x - 1), clamp(pix.y + j, 0u, in.m_size.y - 1))]; const float pixel = in[clamp( make_uint2(pix.x + i, pix.y + j), make_uint2(0u, 0u), make_uint2(in.m_size.x - 1, in.m_size.y - 1) )]; if (pixel > 0) { const float mod = sqr(pixel - center); const float factor = gaussian[make_uint2(i + radius, 0)] * gaussian[make_uint2(j + radius, 0)] * __expf(-mod / (2 * sqr(sigma))); t += factor * pixel; sum += factor; } } } out[pix] = t / sum; } __global__ void HalfSampleRobustKernel( Image<float> out, const Image<float> in, const float robus_threshold, const int radius ) { const uint2 pix = thr2pos2(); const uint2 center_pix = 2 * pix; if (pix.x >= out.m_size.x || pix.y >= out.m_size.y) { return; } float sum = 0; float t = 0; const float center_pixel = in[center_pix]; for (int i = -radius + 1; i <= radius; i++) { for (int j = - radius + 1; j <= radius; j++) { const uint2 pix_in = clamp( make_uint2(center_pix.x + i, center_pix.y + j), make_uint2(0u, 0u), make_uint2(in.m_size.x - 1, in.m_size.y - 1) ); const float pixel = in[pix_in]; if (fabsf(pixel - center_pixel) < robus_threshold) { //accept illuminance within neighberhood in threshold sum += 1; t += pixel; } } } out[pix] = t / sum; } // gaussian for space difference __global__ void GenerateGaussianKernel(Image<float> out, float sigma, int radius) { int x = threadIdx.x - radius; out[make_uint2(threadIdx.x, 0)] = __expf(-sqr(x) / (2 * sqr(sigma))); } __global__ void Depth2VertexKernel(Image<float3> vertex, const Image<float> depth, const Matrix3f invK) { const uint2 pix = thr2pos2(); if (pix.x >= depth.m_size.x || pix.y >= depth.m_size.y) { return; } if (depth[pix] > 0) { vertex[pix] = depth[pix] * (invK * make_float3(pix.x, pix.y, 1)); } else { vertex[pix] = make_float3(0); } } __global__ void Vertex2NormalKernel(Image<float3> normal, const Image<float3> vertex) { const uint2 pix = thr2pos2(); if (pix.x >= vertex.m_size.x || pix.y >= vertex.m_size.y) { return; } const float3 left = vertex[make_uint2(max(static_cast<int>(pix.x) - 1, 0), pix.y)]; const float3 right = vertex[make_uint2(min(pix.x + 1, vertex.m_size.x - 1), pix.y)]; const float3 up = vertex[make_uint2(pix.x, max(static_cast<int>(pix.y) - 1, 0))]; const float3 down = vertex[make_uint2(pix.x, min(pix.y + 1, vertex.m_size.y - 1))]; if (left.z == 0 || right.z == 0 || up.z == 0 || down.z == 0) { normal[pix].x = INVALID; return; } const float3 dxv = right - left; const float3 dyv = down - up; normal[pix] = normalize(cross(dyv, dxv)); } /*! * f = sqr(n^T(p - p')) * e = n^T(p - p') * r = p - p' * J_r = ( I, -p'^ ) * J_e = n^TJ_r = n^T( I, -p'^ ) */ __global__ void TrackKernel( Image<TrackData> out, const Image<float3> input_vertex, const Image<float3> input_normal, const Image<float3> ref_vertex, const Image<float3> ref_normal, const Matrix4f track_pose, const Matrix4f K_inv_raycast_pose, const float dist_threshold, const float normal_threshold ) { const uint2 pix = thr2pos2(); if (pix.x >= input_vertex.m_size.x || pix.y >= input_vertex.m_size.y) { return; } TrackData & row = out[pix]; if (input_normal[pix].x == INVALID) { row.result = -1; return; } // find corresponding vertex from input_vertex to ref_vertex // project vertex from current camera to world const float3 projected_vertex = transform(track_pose, input_vertex[pix]); // project vertex from world to image with raycast pose const float3 projected_pos = transform(K_inv_raycast_pose, projected_vertex); // get image position const float2 projected_pix = make_float2( projected_pos.x / projected_pos.z + 0.5f, projected_pos.y / projected_pos.z + 0.5f ); // out of border if (projected_pix.x < 0 || projected_pix.x >= ref_vertex.m_size.x || projected_pix.y < 0 || projected_pix.y >= ref_vertex.m_size.y) { row.result = -2; return; } const uint2 ref_pix = make_uint2(projected_pix.x, projected_pix.y); const float3 ref_normal_pixel = ref_normal[ref_pix]; if (ref_normal_pixel.x == INVALID) { row.result = -3; return; } // Euclidean difference const float3 diff = ref_vertex[ref_pix] - projected_vertex; // project normal from current camera to world const float3 projected_normal = rotate(track_pose, input_normal[pix]); // outlier if (length(diff) > dist_threshold) { row.result = -4; return; } //TODO: it seems that projected_normal is in world, but ref_normal is in raycast camera //if the normal is figured out from TSDF, it's OK. if (dot(projected_normal, ref_normal_pixel) < normal_threshold) { row.result = -5; return; } row.result = 1; row.error = dot(ref_normal_pixel, diff); reinterpret_cast<float3 *>(row.J)[0] = ref_normal_pixel; // N reinterpret_cast<float3 *>(row.J)[1] = cross(projected_vertex, ref_normal_pixel); // N dot (-p'^) } //TODO: it can be accelerated by reduce primitive __global__ void ReduceKernel(float * out, const Image<TrackData> J, const uint2 size){ __shared__ float S[112][32]; // this is for the final accumulation const uint sline = threadIdx.x; float sums[32]; // 1 for error, 21 for jtj, 6 for jte, 4 for vertex info float * jtj = sums + 7; float * info = sums + 28; for(uint i = 0; i < 32; ++i) sums[i] = 0; for(uint y = blockIdx.x; y < size.y; y += gridDim.x){ for(uint x = sline; x < size.x; x += blockDim.x ){ const TrackData & row = J[make_uint2(x, y)]; if(row.result < 1){ info[1] += row.result == -4 ? 1 : 0; info[2] += row.result == -5 ? 1 : 0; info[3] += row.result > -4 ? 1 : 0; continue; } // Error part sums[0] += row.error * row.error; // JTe part for(int i = 0; i < 6; ++i) sums[i+1] += row.error * row.J[i]; // JTJ part, unfortunatly the double loop is not unrolled well... jtj[0] += row.J[0] * row.J[0]; jtj[1] += row.J[0] * row.J[1]; jtj[2] += row.J[0] * row.J[2]; jtj[3] += row.J[0] * row.J[3]; jtj[4] += row.J[0] * row.J[4]; jtj[5] += row.J[0] * row.J[5]; jtj[6] += row.J[1] * row.J[1]; jtj[7] += row.J[1] * row.J[2]; jtj[8] += row.J[1] * row.J[3]; jtj[9] += row.J[1] * row.J[4]; jtj[10] += row.J[1] * row.J[5]; jtj[11] += row.J[2] * row.J[2]; jtj[12] += row.J[2] * row.J[3]; jtj[13] += row.J[2] * row.J[4]; jtj[14] += row.J[2] * row.J[5]; jtj[15] += row.J[3] * row.J[3]; jtj[16] += row.J[3] * row.J[4]; jtj[17] += row.J[3] * row.J[5]; jtj[18] += row.J[4] * row.J[4]; jtj[19] += row.J[4] * row.J[5]; jtj[20] += row.J[5] * row.J[5]; // extra info here info[0] += 1; } } for(int i = 0; i < 32; ++i) // copy over to shared memory S[sline][i] = sums[i]; __syncthreads(); // wait for everyone to finish if(sline < 32){ // sum up columns and copy to global memory in the final 32 threads for(unsigned i = 1; i < blockDim.x; ++i) S[0][sline] += S[i][sline]; out[sline+blockIdx.x*32] = S[0][sline]; } } __global__ void IntegrateKernel(Volume volume, const Image<float> depth, const Matrix4f inv_track, const Matrix3f K, const float mu, const float max_weight ) { uint3 pix = make_uint3(thr2pos2()); //'auto' inference will result in deadly memory illegal access! float3 pos = transform(inv_track, volume.pos(pix)); float3 cameraX = K * pos; // (u, v, 0) in image coordinate // each increment `delta` is a voxel on z-axis in fact. const float3 delta = rotate(inv_track, make_float3(0, 0, volume.m_dim.z / volume.m_size.z)); const float3 camera_delta = K * delta; // each thread manipulate a column for (pix.z = 0; pix.z < volume.m_size.z; pix.z++, pos += delta, cameraX += camera_delta) { if (pos.z < 0.001f) { continue; } //project the voxel on image plane //why external 0.5? const float2 pixel = make_float2(cameraX.x / cameraX.z + 0.5f, cameraX.y / cameraX.z + 0.5f); if (pixel.x < 0 || pixel.x >= depth.m_size.x || pixel.y < 0 || pixel.y >= depth.m_size.y) { continue; } const uint2 px = make_uint2(pixel.x , pixel.y); if (depth[px] == 0) { continue; } const float diff = (depth[px] - cameraX.z) * (1 +sqr(pos.x / pos.z) + sqr(pos.y / pos.z)); //depth on image is vertical depth, so we use Z rather that length of ray // scalar = (sqr(x) + sqr(y) + sqr(z))/sqr(z) // diff = distance * sqr(atan(theta)) - a approximation of projective diff //update the voxel when depth difference less than threshold if (diff > -mu) { const float sdf = fminf(1.0f, diff/mu); float2 data = volume[pix]; //x = tsdf, y = weight data.x = clamp((data.y * data.x + sdf) / (data.y + 1), -1.0f, 1.0f); data.y = fminf(data.y + 1, max_weight); volume.set(pix, data); } } } } /// this kernel gives an externel depth map for visualization __global__ void RaycastDepthImageKernel( Image<float3> pos3D, // Image<float3> normal, Image<float> depth, Volume volume, Matrix4f view, float near_plane, float far_plane, float step, float large_step) { const auto pos = thr2pos2(); const float4 hit = RaycastForPos( volume, pos, view, near_plane, far_plane, step, large_step ); if (hit.w > 0) { pos3D[pos] = make_float3(hit); depth[pos] = hit.w; } else { pos3D[pos] = make_float3(0); depth[pos] = 0; } } __global__ void RaycastWithNormalKernel( Image<float3> pos3D, Image<float3> normal, Volume volume, Matrix4f view, float near_plane, float far_plane, float step, float large_step) { const auto pos = thr2pos2(); const float4 hit = RaycastForPos( volume, pos, view, near_plane, far_plane, step, large_step ); if (hit.w > 0) { pos3D[pos] = make_float3(hit); float3 surf_normal = volume.grad(make_float3(hit)); if (length(surf_normal) == 0) { normal[pos].x = INVALID; } else { normal[pos] = normalize(surf_normal); } } else { pos3D[pos] = make_float3(0); normal[pos] = make_float3(INVALID, 0, 0); } } KinectFusion::KinectFusion(const Parameters& parameters): m_parameters(parameters) { m_volume.Init(parameters.VolumeSize, parameters.VolumeDimensions); // m_volume.SetBoxWrap(make_float3(0.1f, 0.1f, 0.8f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); // m_volume.SetBoxWrap(make_float3(0.1f, 0.8f, 0.1f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); // m_volume.SetBoxWrap(make_float3(0.8f, 0.1f, 0.1f), make_float3(0.9f, 0.9f, 0.9f), -1.0f); cudaSetDeviceFlags(cudaDeviceMapHost); m_output.Allocate(parameters.InputSize); m_ba_values.Allocate(make_uint2(32, 8)); m_reduction.Allocate(parameters.InputSize); m_raw_depth.Allocate(parameters.InputSize); m_vertex.Allocate(parameters.InputSize); m_normal.Allocate(parameters.InputSize); m_cameraK = m_parameters.CameraK; m_input_depth.resize(parameters.ICPLevels); m_input_vertex.resize(parameters.ICPLevels); m_input_normal.resize(parameters.ICPLevels); m_inv_cameraKs.resize(parameters.ICPLevels); for (auto i = 0; i < parameters.ICPLevels; i++) { m_input_depth[i].Allocate(parameters.InputSize >> i); m_input_vertex[i].Allocate(parameters.InputSize >> i); m_input_normal[i].Allocate(parameters.InputSize >> i); m_inv_cameraKs[i] << (1 << i) / m_cameraK(0, 0) , 0, - m_cameraK(0, 2) / m_cameraK(0, 0), 0, (1 << i) / m_cameraK(1, 1), - m_cameraK(1, 2)/ m_cameraK(1, 1), 0, 0, 1; } m_gaussian.Allocate(make_uint2(parameters.GaussianRadius * 2 + 1, 1)); GenerateGaussianKernel<<<1, m_gaussian.m_size.x>>>( m_gaussian, parameters.GaussianFunctionSigma, parameters.GaussianRadius ); } void KinectFusion::Raycast() { m_raycast_pose = m_pose; dim3 block(16, 16); std::cout << "near plane = " << m_parameters.NearPlane << std::endl; std::cout << "far plane = " << m_parameters.FarPlane << std::endl; std::cout << "step size = " << StepSize() << std::endl; std::cout << "large step size = " << LargeStepSize() << std::endl; std::cout << "Pose = " << std::endl; Matrix4f m = Matrix4f::Identity(); m(0, 0) = m_cameraK(0, 0); m(1, 1) = m_cameraK(1, 1); m(0, 2) = m_cameraK(0, 2); m(1, 2) = m_cameraK(1, 2); m = m_raycast_pose * m.inverse(); std::cout << m << std::endl; RaycastDepthImageKernel<<<divup(m_parameters.InputSize, block), block>>>( m_vertex, m_output.GetDeviceImage(), m_volume, m, m_parameters.NearPlane, m_parameters.FarPlane, StepSize(), LargeStepSize() ); RaycastWithNormalKernel<<<divup(m_parameters.InputSize, block), block>>>( m_vertex, m_normal, m_volume, m, m_parameters.NearPlane, m_parameters.FarPlane, StepSize(), LargeStepSize() ); } void KinectFusion::Integrate() { Matrix4f inverse_pose = m_pose.inverse(); std::cout << "inverse pose:" << std::endl << inverse_pose << std::endl; IntegrateKernel<<<divup(dim3(m_volume.m_size.x, m_volume.m_size.y), m_parameters.ImageBlock), m_parameters.ImageBlock>>>( m_volume, m_raw_depth, inverse_pose, m_parameters.CameraK, m_parameters.FusionThreshold, m_parameters.MaxWeight ); } /*! * 1.bilateral filter and down sample(coarse to fine) * 2.Coarse to fine iteration: * 1.compute vertex and normal from raycast * 2.compute jaccobian * 3.reduce jaccobian and solve linear equation * 4.update pose, goto 1(optimize iteration) */ bool KinectFusion::Track() { std::vector<dim3> grids; for (auto i = 0; i < m_parameters.ICPLevels; i++) { grids.push_back(divup(m_parameters.InputSize >> i, m_parameters.ImageBlock)); } // bilateral filter BilateralFilterKernel<<<grids[0], m_parameters.ImageBlock>>>( m_input_depth[0], m_raw_depth, m_gaussian, m_parameters.GaussianIlluminanceSigma, m_parameters.GaussianRadius ); // downsample std::cout << "Gausian ill = " << m_parameters.GaussianIlluminanceSigma << std::endl; std::cout << "Gausian Radius = " << m_parameters.GaussianRadius << std::endl; for (auto i = 1; i < m_parameters.ICPLevels; i++) { HalfSampleRobustKernel<<<grids[i], m_parameters.ImageBlock>>>( m_input_depth[i], m_input_depth[i-1], m_parameters.GaussianIlluminanceSigma * 3, 1 ); } for (auto itr = 0; itr < m_parameters.ICPLevels; itr++) { Depth2VertexKernel<<<grids[itr], m_parameters.ImageBlock>>>( m_input_vertex[itr], m_input_depth[itr], m_inv_cameraKs[itr] ); Vertex2NormalKernel<<<grids[itr], m_parameters.ImageBlock>>>( m_input_normal[itr], m_input_vertex[itr] ); } const Matrix4f old_pose = m_pose; const Matrix4f inv_raycast_pose = m_raycast_pose.inverse(); const Matrix4f project_ref = combine_intrinsics(m_cameraK, inv_raycast_pose); auto values = Eigen::Map<Eigen::Matrix<float, 8, 32, Eigen::RowMajor>>(m_ba_values.Data()); std::cout << "---------------------------------" << std::endl; std::cout << "ICP Track : " << std::endl; for (auto level = m_parameters.ICPLevels - 1; level >= 0; level--) { std::cout << "level " << level << "::::" << std::endl; for (auto itr = 0; itr < m_parameters.ICPIterationTimes[level]; itr++) { std::cout << "itr " << itr << ": "; TrackKernel<<<grids[level], m_parameters.ImageBlock>>>( m_reduction, m_input_vertex[level], m_input_normal[level], m_vertex, m_normal, m_pose, project_ref, 0.2f, // dist_threshold 0.8f // normal_threshold ); ReduceKernel<<<8, 112>>>( m_ba_values.GetDeviceImage().Data(), m_reduction, m_input_vertex[level].m_size ); cudaDeviceSynchronize(); //synchronize host pin memory // solve linear equation Vector32f v = values.colwise().sum(); std::cout << "inlier pts = " << v(28) << std::endl; Vector6f delta_se3x = solve(Vector27f(v.segment(1, 27))); // SE3 transform std::cout << "exp pose = " << std::endl << exp(delta_se3x) << std::endl; m_pose = exp(delta_se3x) * m_pose; std::cout << "err = : " << v(0) << std::endl; std::cout << "delta_se3-: " << delta_se3x << std::endl; if (delta_se3x.norm() < 1e-5) { std::cout << "Ahead of optimization" << std::endl; break; } } } Vector32f v = values.colwise().sum(); if ((sqrt(v(0) / v(28)) > 2e-2) || (v(28)) / (m_raw_depth.m_size.x * m_raw_depth.m_size.y) < 0.15f) { std::cout << "Don't update pose" << std::endl; m_pose = old_pose; return false; } return true; }
f31ee77551ad1874c1e93daddf52053cc6b84668.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // from the following URL // https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks /* * this program is a simple test of the binary tree recommended reduction * algorithm for CUDA * */ #include <iostream> #define TOTAL_SIZE 100000 #define nTPB 256 #define BLOCK_SIZE 64 // MUST BE A POWER OF 2!! //#define NUM_THREADS 1 // this is now "BLOCK_SIZE" #define NUM_BLOCKS 1 #define LENGTH_LOOKUP 240 __global__ void sumMaxMinKernel( float* inputAtoms, float* answers ) { // Reduction (min/max/avr/sum), valid only when blockDim.x is a power of two: int thread2; float temp; __shared__ float min[BLOCK_SIZE], max[BLOCK_SIZE], avg[BLOCK_SIZE], sum[BLOCK_SIZE]; // printf("inputting %f %d\n", inputAtoms[threadIdx.x], threadIdx.x); // import info min[threadIdx.x] = inputAtoms[threadIdx.x]; max[threadIdx.x] = inputAtoms[threadIdx.x]; avg[threadIdx.x] = inputAtoms[threadIdx.x]; sum[threadIdx.x] = inputAtoms[threadIdx.x]; __syncthreads(); int nTotalThreads = blockDim.x; // Total number of active threads while (nTotalThreads > 1) { int halfPoint = (nTotalThreads >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // printf("thread pair %d %d\n", threadIdx.x, threadIdx.x + halfPoint); thread2 = threadIdx.x + halfPoint; // Get the shared value stored by another thread // min reduction temp = min[thread2]; if (temp < min[threadIdx.x]) min[threadIdx.x] = temp; // max reduction temp = max[thread2]; if (temp > max[threadIdx.x]) max[threadIdx.x] = temp; // sum reduction // printf("sum %f %f\n", sum[threadIdx.x], sum[thread2]); sum[threadIdx.x] += sum[thread2]; // average reduction avg[threadIdx.x] += avg[thread2]; avg[threadIdx.x] *= 0.5f; } __syncthreads(); // Reducing the binary tree size by two: nTotalThreads = halfPoint; } // export results if (threadIdx.x == 0) { answers[0] = min[0]; answers[1] = max[0]; answers[2] = avg[0]; answers[3] = sum[0]; } } int main(){ //-------------------------------------------------------------------------- // practice code (array) // allocate variable on the GPU and CPU int i; float *inputAtoms, *d_inputAtoms, *answers, *d_answers; size_t sizeArray; sizeArray = sizeof(float) * BLOCK_SIZE; inputAtoms = (float*) malloc ( sizeArray ); hipMalloc( &d_inputAtoms, sizeArray ); sizeArray = sizeof(float) * 4; answers = (float*) malloc ( sizeArray ); hipMalloc( &d_answers, sizeArray ); // copy local variable to GPU for ( i = 0; i < BLOCK_SIZE; ++i ) { inputAtoms[i] = 1.0f + (float)i; printf("%f ", inputAtoms[i]); } printf("\n\n"); hipMemcpy( d_inputAtoms, inputAtoms, sizeof(float) * BLOCK_SIZE, hipMemcpyHostToDevice ); // run atomicAdd kernel hipLaunchKernelGGL(( sumMaxMinKernel), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_inputAtoms, d_answers); hipDeviceSynchronize(); // copy back result to local memory hipMemcpy( answers, d_answers, sizeof(float) * 4, hipMemcpyDeviceToHost ); // report results and close printf("min %f\n", answers[0]); printf("max %f\n", answers[1]); printf("average %f\n", answers[2]); printf("sum %f\n", answers[3]); free( inputAtoms ); hipFree( d_inputAtoms ); free( answers ); hipFree( d_answers ); return 0; }
f31ee77551ad1874c1e93daddf52053cc6b84668.cu
// from the following URL // https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks /* * this program is a simple test of the binary tree recommended reduction * algorithm for CUDA * */ #include <iostream> #define TOTAL_SIZE 100000 #define nTPB 256 #define BLOCK_SIZE 64 // MUST BE A POWER OF 2!! //#define NUM_THREADS 1 // this is now "BLOCK_SIZE" #define NUM_BLOCKS 1 #define LENGTH_LOOKUP 240 __global__ void sumMaxMinKernel( float* inputAtoms, float* answers ) { // Reduction (min/max/avr/sum), valid only when blockDim.x is a power of two: int thread2; float temp; __shared__ float min[BLOCK_SIZE], max[BLOCK_SIZE], avg[BLOCK_SIZE], sum[BLOCK_SIZE]; // printf("inputting %f %d\n", inputAtoms[threadIdx.x], threadIdx.x); // import info min[threadIdx.x] = inputAtoms[threadIdx.x]; max[threadIdx.x] = inputAtoms[threadIdx.x]; avg[threadIdx.x] = inputAtoms[threadIdx.x]; sum[threadIdx.x] = inputAtoms[threadIdx.x]; __syncthreads(); int nTotalThreads = blockDim.x; // Total number of active threads while (nTotalThreads > 1) { int halfPoint = (nTotalThreads >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // printf("thread pair %d %d\n", threadIdx.x, threadIdx.x + halfPoint); thread2 = threadIdx.x + halfPoint; // Get the shared value stored by another thread // min reduction temp = min[thread2]; if (temp < min[threadIdx.x]) min[threadIdx.x] = temp; // max reduction temp = max[thread2]; if (temp > max[threadIdx.x]) max[threadIdx.x] = temp; // sum reduction // printf("sum %f %f\n", sum[threadIdx.x], sum[thread2]); sum[threadIdx.x] += sum[thread2]; // average reduction avg[threadIdx.x] += avg[thread2]; avg[threadIdx.x] *= 0.5f; } __syncthreads(); // Reducing the binary tree size by two: nTotalThreads = halfPoint; } // export results if (threadIdx.x == 0) { answers[0] = min[0]; answers[1] = max[0]; answers[2] = avg[0]; answers[3] = sum[0]; } } int main(){ //-------------------------------------------------------------------------- // practice code (array) // allocate variable on the GPU and CPU int i; float *inputAtoms, *d_inputAtoms, *answers, *d_answers; size_t sizeArray; sizeArray = sizeof(float) * BLOCK_SIZE; inputAtoms = (float*) malloc ( sizeArray ); cudaMalloc( &d_inputAtoms, sizeArray ); sizeArray = sizeof(float) * 4; answers = (float*) malloc ( sizeArray ); cudaMalloc( &d_answers, sizeArray ); // copy local variable to GPU for ( i = 0; i < BLOCK_SIZE; ++i ) { inputAtoms[i] = 1.0f + (float)i; printf("%f ", inputAtoms[i]); } printf("\n\n"); cudaMemcpy( d_inputAtoms, inputAtoms, sizeof(float) * BLOCK_SIZE, cudaMemcpyHostToDevice ); // run atomicAdd kernel sumMaxMinKernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_inputAtoms, d_answers); cudaDeviceSynchronize(); // copy back result to local memory cudaMemcpy( answers, d_answers, sizeof(float) * 4, cudaMemcpyDeviceToHost ); // report results and close printf("min %f\n", answers[0]); printf("max %f\n", answers[1]); printf("average %f\n", answers[2]); printf("sum %f\n", answers[3]); free( inputAtoms ); cudaFree( d_inputAtoms ); free( answers ); cudaFree( d_answers ); return 0; }
7a5640d0ff30c1e338ec28865bbf5e2ce92ca22e.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Convolution 2D profiling */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "conv2d_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// using namespace cutlass::library; namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor Conv2dOperationProfiler::Conv2dOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kConv2d, { {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"}, {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"g", "groups"}, "Number of convolution groups"}, {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"}, {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"}, {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"}, {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"}, {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"}, {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"}, {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"}, {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"}, {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"}, {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"}, {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"}, }, { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN } ) { description_ = " Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D)"; } /// Destructor Conv2dOperationProfiler::~Conv2dOperationProfiler() { } /// Prints usage statement for the math function void Conv2dOperationProfiler::print_usage(std::ostream &out) const { out << "Conv2d" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void Conv2dOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular convolution (specify all the convolution parameters):\n" << " $ cutlass_profiler --operation=Conv2d" " --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32" " --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3" " --pad_h=1 --pad_w=1" " --stride_h=1 --stride_w=1" " --dilation_h=1 --dilation_w=1\n\n"; } #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////// /// Total number of bytes loaded int64_t Conv2dOperationProfiler::Conv2dProblem::bytes( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); // Input bytes read and Output bytes written for the gemm problem int64_t bytes_ = int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); } return bytes_; } /// Total number of flops computed int64_t Conv2dOperationProfiler::Conv2dProblem::flops( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2; int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2; // Adjust mainloop flop for dgrad strided if (operation_desc.conv_kind == library::ConvKind::kDgrad) { flops_mainloop_ = flops_mainloop_ / (stride_h * stride_w); } int64_t flops_total_ = flops_mainloop_ + flops_epilogue_; //complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: flops_total_ *=4; break; default: break; } return flops_total_; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status Conv2dOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(operation->description()); if (!arg_as_int(problem_.n, "n", problem_space, problem)) { // default value problem_.n = 1; } if (!arg_as_int(problem_.h, "h", problem_space, problem)) { // default value problem_.h = 16; } if (!arg_as_int(problem_.w, "w", problem_space, problem)) { // default value problem_.w = 16; } if (!arg_as_int(problem_.c, "c", problem_space, problem)) { // default value problem_.c = 64; } if (!arg_as_int(problem_.k, "k", problem_space, problem)) { // default value problem_.k = 64; } if (!arg_as_int(problem_.r, "r", problem_space, problem)) { // default value problem_.r = 3; } if (!arg_as_int(problem_.s, "s", problem_space, problem)) { // default value problem_.s = 3; } if (!arg_as_int(problem_.groups, "g", problem_space, problem)) { // default value problem_.groups = 1; } if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) { // default value problem_.pad_h = 1; } if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) { // default value problem_.pad_w = 1; } if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) { // default value problem_.stride_h = 1; } if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) { // default value problem_.stride_w = 1; } if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) { // default value problem_.dilation_h = 1; } if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) { // default value problem_.dilation_w = 1; } //////////////////////// Convolution output dimensions p and q //////////////////////// // Cutlass convolutions support arbitrary output sizes and not constrained by // // input, filter, padding, striding, dilation sizes. // // cuDNN sets the output dimensions (p, q) using following equations: // // // // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // // where; div_up(a, b) : (a - 1)/b + 1 // // // // Thus, when output p and q dimensions are unspecified by the user // // cutlass profiler sets p and q which are cuDNN compliant. // // // //////////////////////////////////////////////////////////////////////////////////////// // set convolution output p if (!arg_as_int(problem_.p, "p", problem_space, problem)) { // default value (set using cudnn formula for output height, when p is not provided) problem_.p = ( problem_.h + 2 * problem_.pad_h - ((problem_.r - 1) * problem_.dilation_h + 1) ) / (problem_.stride_h) + 1; } // set convolution output q if (!arg_as_int(problem_.q, "q", problem_space, problem)) { // default value (set using cudnn formula for output width, when q is not provided) problem_.q = ( problem_.w + 2 * problem_.pad_w - ((problem_.s - 1) * problem_.dilation_w + 1) ) / (problem_.stride_w) + 1; } ///////////////////////////////////////////////////////////////////////////////////////// if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) { // default value problem_.split_k_mode = library::SplitKMode::kSerial; } if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) { // default value problem_.split_k_slices = 1; } if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) { // default value problem_.conv_mode = library::ConvModeID::kCrossCorrelation; } if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) { // default value problem_.eq_gemm_provider = library::Provider::kNone; } if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( problem_.alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( problem_.beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } // initialize library::Conv2dConfiguration conv_workspace_.configuration.problem_size = conv::Conv2dProblemSize( int(problem_.n), int(problem_.h), int(problem_.w), int(problem_.c), int(problem_.k), int(problem_.r), int(problem_.s), int(problem_.p), int(problem_.q), int(problem_.pad_h), int(problem_.pad_w), int(problem_.stride_h), int(problem_.stride_w), int(problem_.dilation_h), int(problem_.dilation_w), static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)), int(problem_.split_k_slices), int(problem_.groups) ); conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode)); conv_workspace_.set_stride_vector( problem_, operation_desc.conv_kind, operation_desc.A.layout, operation_desc.B.layout, operation_desc.C.layout); // initialize library::ConvArguments conv_workspace_.arguments.A = nullptr; conv_workspace_.arguments.B = nullptr; conv_workspace_.arguments.C = nullptr; conv_workspace_.arguments.D = nullptr; conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // initialize reduction operation for parallel splitKMode if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) { return Status::kErrorInternal; } } initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments); } /// Initializes the performance result void Conv2dOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::ConvDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; result.arguments.resize(problem_space.rank()); set_argument(result, "Activation", problem_space, std::string(library::to_string(operation_desc.activation().element)) + ":" + library::to_string(operation_desc.activation().layout)); set_argument(result, "Filter", problem_space, std::string(library::to_string(operation_desc.filter().element)) + ":" + library::to_string(operation_desc.filter().layout)); set_argument(result, "Output", problem_space, std::string(library::to_string(operation_desc.output().element)) + ":" + library::to_string(operation_desc.output().layout)); set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind)); set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm))); set_argument(result, "n", problem_space, problem_.n); set_argument(result, "h", problem_space, problem_.h); set_argument(result, "w", problem_space, problem_.w); set_argument(result, "c", problem_space, problem_.c); set_argument(result, "k", problem_space, problem_.k); set_argument(result, "r", problem_space, problem_.r); set_argument(result, "s", problem_space, problem_.s); set_argument(result, "p", problem_space, problem_.p); set_argument(result, "q", problem_space, problem_.q); set_argument(result, "g", problem_space, problem_.groups); set_argument(result, "pad_h", problem_space, problem_.pad_h); set_argument(result, "pad_w", problem_space, problem_.pad_w); set_argument(result, "stride_h", problem_space, problem_.stride_h); set_argument(result, "stride_w", problem_space, problem_.stride_w); set_argument(result, "dilation_h", problem_space, problem_.dilation_h); set_argument(result, "dilation_w", problem_space, problem_.dilation_w); set_argument(result, "split_k_mode", problem_space, std::string(library::to_string(problem_.split_k_mode))); set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices); set_argument(result, "conv_mode", problem_space, std::string(library::to_string(problem_.conv_mode))); set_argument(result, "alpha", problem_space, library::lexical_cast(problem_.alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(problem_.beta, operation_desc.element_epilogue)); set_argument(result, "eq_gemm_provider", problem_space, std::string(library::to_string(problem_.eq_gemm_provider))); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Bytes of activation, filter, and output tensors int64_t activation_bytes = int64_t(library::sizeof_bits(operation_desc.activation().element) / 8) * conv_workspace_.configuration.problem_size.activation_size(); int64_t filter_bytes = int64_t(library::sizeof_bits(operation_desc.filter().element) / 8) * conv_workspace_.configuration.problem_size.filter_size(); int64_t output_bytes = int64_t(library::sizeof_bits(operation_desc.output().element) / 8) * conv_workspace_.configuration.problem_size.output_size(); // Bytes of activation, filter, and output tensors result.bytes = problem_.bytes(operation_desc); // Theoretical flops required for the computation result.flops = problem_.flops(operation_desc); // Measured runtime result.runtime = 0; } /// Initialize reduction problem dimensions and library::Operation bool Conv2dOperationProfiler::initialize_reduction_configuration_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); library::ConvKind const &conv_kind = conv_desc.conv_kind; if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) { return false; } if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) { return false; } /// This chooses the appropriate stride element of the row-major C tensor. int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 2 : 0); /// initialize library::ReductionConfiguration conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn(); conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product(); conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; // find reduction operation library::ReductionFunctionalKey reduction_key( library::Provider::kCUTLASS, conv_desc.tile_description.math_instruction.element_accumulator, // element workspace conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator conv_desc.C.element, // element output conv_desc.element_epilogue // element compute ); #if 0// debug print to check which reduction instance is selected std::cout << reduction_key << "\n"; #endif auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key); if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) { return false; } // initialize reduction operation required for parallel split-k conv2d operator reduction_op_ = reduction_it->second; // reduction operation found and initialized return true; } /// Initializes workspace Status Conv2dOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(underlying_operation->description()); // Compute the number of copies of the problem to avoid L2 camping. if (!options.profiling.workspace_count) { int64_t bytes = problem_.bytes(operation_desc); if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { conv_workspace_.problem_count = 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); } else { conv_workspace_.problem_count = 1; } } else { conv_workspace_.problem_count = options.profiling.workspace_count; } if (options.execution_mode != ExecutionMode::kDryRun) { conv_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, problem_.extent_a(operation_desc.conv_kind), conv_workspace_.configuration.stride_a, conv_workspace_.problem_count ); conv_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.configuration.stride_b, conv_workspace_.problem_count ); if(problem_.groups == problem_.c && problem_.groups == problem_.k){ // Depthwise direct conv kernel needs reorder the filter. conv_workspace_.reordered_B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.configuration.stride_b, conv_workspace_.problem_count ); } conv_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); conv_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); conv_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration); conv_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration); conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = underlying_operation->initialize( &conv_workspace_.configuration, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (status != Status::kSuccess) { return status; } if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration); conv_workspace_.reduction_host_workspace.resize(workspace_size, 0); status = reduction_op_->initialize( &conv_workspace_.reduction_configuration, conv_workspace_.reduction_host_workspace.data(), nullptr); if (status != Status::kSuccess) { return status; } } } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kConv2d; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool Conv2dOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } hipError_t result; // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Computed->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.reordered_B != nullptr){ conv_workspace_.arguments.reordered_B = conv_workspace_.reordered_B->data(); }else{ conv_workspace_.arguments.reordered_B = nullptr; } conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data()); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } // // Run the CUTLASS operation // // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { results_.back().disposition = Disposition::kFailed; return false; } } #if 0 std::cout << "profiling : " << std::endl << "conv2d : " << operation->description().name << std::endl << "underlying conv2d : " << underlying_operation->description().name << std::endl << "reduction : " << reduction_op_->description().name << std::endl; #endif // run cutlass conv2d operation results_.back().status = underlying_operation->run( &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { results_.back().status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } } // Synchronize before running device reference result = hipDeviceSynchronize(); if (result != hipSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUDNN // Run verification cudnn reference if (options.verification.provider_enabled(library::Provider::kCUDNN)) { // Guard against unsupported cases auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description()); Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration); // Initialize reference data to the source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); if (status == Status::kSuccess) { // call cudnn verification if supported verify_with_cudnn_( options, report, device_context, operation, problem_space, problem); } else if (status == Status::kErrorInvalidProblem) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem; } else { // set verification map for cudnn to not supported results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUDNN // Run verification device reference if (options.verification.provider_enabled(library::Provider::kReferenceDevice)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_device_reference_( options, report, device_context, operation, problem_space, problem); } // Run verification host reference if (options.verification.provider_enabled(library::Provider::kReferenceHost)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_host_reference_( options, report, device_context, operation, problem_space, problem); } // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv2dOperationProfiler::verify_with_host_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find host reference operation using conv2d functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv2d_key( library::Provider::kReferenceHost, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); #if 0 // debug print to check which host reference instance is selected std::cout << conv2d_key << "\n"; #endif auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // conv2d host reference minimum cc is 0 (CPU) and no iterator algorithm library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // host reference has only one instances in Conv2dOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Copy input tensors A, B, and C from device to host buffers // conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes()); conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes()); conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes()); conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data()); conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data()); conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data()); // // Initialize structure containing Conv2d arguments // conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data(); conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data(); conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Initialize host reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // // Run host reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified; return true; } // // Copy host reference output to device memory for equality check on device // conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D); // // Verify results // results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceHost); } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv2dOperationProfiler::verify_with_device_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find device reference operation using conv2d functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv2d_key( library::Provider::kReferenceDevice, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; return true; } // conv2d device reference minimum cc is 50 and no iterator algorithm library::ConvPreferenceKey preference_key(50, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; return true; } // device reference has only one instances in Conv2dOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Initialize device reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run device reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotVerified; return true; } // // Verify results // results_.back().verification_map[library::Provider::kReferenceDevice] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceDevice] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceDevice); } // Return true means continue profiling return true; } /// Measures performance results bool Conv2dOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Computed->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data() ); } return true; } /// Method to profile a CUTLASS Operation Status Conv2dOperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; library::ConvArguments *conv_arguments = static_cast<library::ConvArguments *>(arguments); if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { // Setup rotating workspace int workspace_idx = options.profiling.warmup_iterations + iteration; int problem_idx = (workspace_idx % conv_workspace_.problem_count); conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_arguments->D = conv_workspace_.device_workspace.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); } // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { // Setup rotating workspace int problem_idx = (iteration % conv_workspace_.problem_count); conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_arguments->D = conv_workspace_.device_workspace.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); } // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if CUTLASS_ENABLE_CUDNN /// Verifies CUTLASS against cudnn reference bool Conv2dOperationProfiler::verify_with_cudnn_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); // // Construct cudnn operators // CudnnCreate handle; cudnnStatus_t status = handle.get_cudnn_create_status(); if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Initialize state // // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // cuDNN does not support four tensor arguments, so we copy the tensor C data into // tensor D. conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); conv_workspace_.arguments.C = conv_workspace_.arguments.D; try { // // Construct dispatcher to cudnn operator // detail::cudnnConvDispatcher conv_op( conv_desc, conv_workspace_.configuration, conv_workspace_.arguments, handle ); if (conv_op.status != Status::kSuccess) { if (conv_op.status == Status::kErrorNotSupported) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } else { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } return true; } status = conv_op(handle); // Handle errors if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) { save_workspace( device_context, options, conv_desc, library::Provider::kCUTLASS, library::Provider::kCUDNN); } } catch (...) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } // Return true means continue profiling return true; } #endif // #if CUTLASS_ENABLE_CUDNN ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
7a5640d0ff30c1e338ec28865bbf5e2ce92ca22e.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Convolution 2D profiling */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "conv2d_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// using namespace cutlass::library; namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor Conv2dOperationProfiler::Conv2dOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kConv2d, { {ArgumentTypeID::kEnumerated, {"conv_kind"}, "Convolutional operator (fprop, dgrad, wgrad)"}, {ArgumentTypeID::kInteger, {"n", "input_n"}, "Input N dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"h", "input_h"}, "Input H dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"w", "input_w"}, "Input W dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"c", "input_c"}, "Input C dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"k", "filter_k"}, "Filter K dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"r", "filter_r"}, "Filter R dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"s", "filter_s"}, "Filter S dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"p", "output_p"}, "Output P dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"q", "output_q"}, "Output Q dimension of the Conv2d problem space"}, {ArgumentTypeID::kInteger, {"g", "groups"}, "Number of convolution groups"}, {ArgumentTypeID::kInteger, {"pad_h"}, "Padding in H direction"}, {ArgumentTypeID::kInteger, {"pad_w"}, "Padding in W direction"}, {ArgumentTypeID::kInteger, {"stride_h"}, "Stride in H direction"}, {ArgumentTypeID::kInteger, {"stride_w"}, "Stride in W direction"}, {ArgumentTypeID::kInteger, {"dilation_h"}, "Dilation in H direction"}, {ArgumentTypeID::kInteger, {"dilation_w"}, "Dilation in W direction"}, {ArgumentTypeID::kTensor, {"Activation"}, "Tensor storing the Activation operand"}, {ArgumentTypeID::kTensor, {"Filter"}, "Tensor storing the Filter operand"}, {ArgumentTypeID::kTensor, {"Output"}, "Tensor storing the Output operand"}, {ArgumentTypeID::kEnumerated, {"conv_mode"}, "Convolution filter mode (conv, cross)"}, {ArgumentTypeID::kEnumerated, {"iterator_algorithm", "iterator_algo"}, "Convolution iterator algorithm (analytic, optimized)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kEnumerated, {"split_k_mode", "split-k-mode"}, "SplitK mode for serial or parallel reduction (serial, parallel)"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kEnumerated, {"eq_gemm_provider", "eq-gemm-provider"}, "Enable profiling equivalent gemm by the following providers (cutlass)"}, }, { library::Provider::kReferenceDevice, library::Provider::kReferenceHost, library::Provider::kCUDNN } ) { description_ = " Conv2d operation. Output(Tensor4D) = alpha * Input(Tensor4D) * Filter(Tensor4D) + beta * Input(Tensor4D)"; } /// Destructor Conv2dOperationProfiler::~Conv2dOperationProfiler() { } /// Prints usage statement for the math function void Conv2dOperationProfiler::print_usage(std::ostream &out) const { out << "Conv2d" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void Conv2dOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular convolution (specify all the convolution parameters):\n" << " $ cutlass_profiler --operation=Conv2d" " --Activation=f16:nhwc --Filter=f16:nhwc --Output=f16 --accumulator-type=f32" " --n=32 --h=14 --w=14 --c=8 --k=64 --r=3 --s=3" " --pad_h=1 --pad_w=1" " --stride_h=1 --stride_w=1" " --dilation_h=1 --dilation_w=1\n\n"; } #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif ///////////////////////////////////////////////////////////////////////////////////////////////// /// Total number of bytes loaded int64_t Conv2dOperationProfiler::Conv2dProblem::bytes( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); // Input bytes read and Output bytes written for the gemm problem int64_t bytes_ = int64_t(library::sizeof_bits(operation_desc.A.element) * mnk.m() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.B.element) * mnk.n() / 8) * mnk.k() + int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes_ += int64_t(library::sizeof_bits(operation_desc.C.element) * mnk.m() / 8) * mnk.n(); } return bytes_; } /// Total number of flops computed int64_t Conv2dOperationProfiler::Conv2dProblem::flops( library::ConvDescription const &operation_desc) const { cutlass::gemm::GemmCoord mnk = eq_gemm_size(operation_desc.conv_kind); int64_t flops_mainloop_ = int64_t(mnk.m()) * mnk.n() * mnk.k() * 2; int64_t flops_epilogue_ = int64_t(mnk.m()) * int64_t(mnk.n()) * 2; // Adjust mainloop flop for dgrad strided if (operation_desc.conv_kind == library::ConvKind::kDgrad) { flops_mainloop_ = flops_mainloop_ / (stride_h * stride_w); } int64_t flops_total_ = flops_mainloop_ + flops_epilogue_; //complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: flops_total_ *=4; break; default: break; } return flops_total_; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status Conv2dOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(operation->description()); if (!arg_as_int(problem_.n, "n", problem_space, problem)) { // default value problem_.n = 1; } if (!arg_as_int(problem_.h, "h", problem_space, problem)) { // default value problem_.h = 16; } if (!arg_as_int(problem_.w, "w", problem_space, problem)) { // default value problem_.w = 16; } if (!arg_as_int(problem_.c, "c", problem_space, problem)) { // default value problem_.c = 64; } if (!arg_as_int(problem_.k, "k", problem_space, problem)) { // default value problem_.k = 64; } if (!arg_as_int(problem_.r, "r", problem_space, problem)) { // default value problem_.r = 3; } if (!arg_as_int(problem_.s, "s", problem_space, problem)) { // default value problem_.s = 3; } if (!arg_as_int(problem_.groups, "g", problem_space, problem)) { // default value problem_.groups = 1; } if (!arg_as_int(problem_.pad_h, "pad_h", problem_space, problem)) { // default value problem_.pad_h = 1; } if (!arg_as_int(problem_.pad_w, "pad_w", problem_space, problem)) { // default value problem_.pad_w = 1; } if (!arg_as_int(problem_.stride_h, "stride_h", problem_space, problem)) { // default value problem_.stride_h = 1; } if (!arg_as_int(problem_.stride_w, "stride_w", problem_space, problem)) { // default value problem_.stride_w = 1; } if (!arg_as_int(problem_.dilation_h, "dilation_h", problem_space, problem)) { // default value problem_.dilation_h = 1; } if (!arg_as_int(problem_.dilation_w, "dilation_w", problem_space, problem)) { // default value problem_.dilation_w = 1; } //////////////////////// Convolution output dimensions p and q //////////////////////// // Cutlass convolutions support arbitrary output sizes and not constrained by // // input, filter, padding, striding, dilation sizes. // // cuDNN sets the output dimensions (p, q) using following equations: // // // // output = div_up(input + 2 * pad - ((filter - 1) * dilation + 1) + 1, stride) // // where; div_up(a, b) : (a - 1)/b + 1 // // // // Thus, when output p and q dimensions are unspecified by the user // // cutlass profiler sets p and q which are cuDNN compliant. // // // //////////////////////////////////////////////////////////////////////////////////////// // set convolution output p if (!arg_as_int(problem_.p, "p", problem_space, problem)) { // default value (set using cudnn formula for output height, when p is not provided) problem_.p = ( problem_.h + 2 * problem_.pad_h - ((problem_.r - 1) * problem_.dilation_h + 1) ) / (problem_.stride_h) + 1; } // set convolution output q if (!arg_as_int(problem_.q, "q", problem_space, problem)) { // default value (set using cudnn formula for output width, when q is not provided) problem_.q = ( problem_.w + 2 * problem_.pad_w - ((problem_.s - 1) * problem_.dilation_w + 1) ) / (problem_.stride_w) + 1; } ///////////////////////////////////////////////////////////////////////////////////////// if (!arg_as_SplitKModeID(problem_.split_k_mode, "split_k_mode", problem_space, problem)) { // default value problem_.split_k_mode = library::SplitKMode::kSerial; } if (!arg_as_int(problem_.split_k_slices, "split_k_slices", problem_space, problem)) { // default value problem_.split_k_slices = 1; } if (!arg_as_ConvModeID(problem_.conv_mode, "conv_mode", problem_space, problem)) { // default value problem_.conv_mode = library::ConvModeID::kCrossCorrelation; } if (!arg_as_ProviderID(problem_.eq_gemm_provider, "eq_gemm_provider", problem_space, problem)) { // default value problem_.eq_gemm_provider = library::Provider::kNone; } if (!conv_kind_satisfies(operation_desc.conv_kind, "conv_kind", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!iterator_algorithm_satisfies(operation_desc.iterator_algorithm, "iterator_algorithm", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.activation(), "Activation", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.filter(), "Filter", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.output(), "Output", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( problem_.alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(problem_.alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( problem_.beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(problem_.beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } // initialize library::Conv2dConfiguration conv_workspace_.configuration.problem_size = conv::Conv2dProblemSize( int(problem_.n), int(problem_.h), int(problem_.w), int(problem_.c), int(problem_.k), int(problem_.r), int(problem_.s), int(problem_.p), int(problem_.q), int(problem_.pad_h), int(problem_.pad_w), int(problem_.stride_h), int(problem_.stride_w), int(problem_.dilation_h), int(problem_.dilation_w), static_cast<conv::Mode>(static_cast<int>(problem_.conv_mode)), int(problem_.split_k_slices), int(problem_.groups) ); conv_workspace_.configuration.split_k_mode = static_cast<conv::SplitKMode>(static_cast<int>(problem_.split_k_mode)); conv_workspace_.set_stride_vector( problem_, operation_desc.conv_kind, operation_desc.A.layout, operation_desc.B.layout, operation_desc.C.layout); // initialize library::ConvArguments conv_workspace_.arguments.A = nullptr; conv_workspace_.arguments.B = nullptr; conv_workspace_.arguments.C = nullptr; conv_workspace_.arguments.D = nullptr; conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // initialize reduction operation for parallel splitKMode if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if(!initialize_reduction_configuration_(options, report, device_context, operation, problem_space, problem)) { return Status::kErrorInternal; } } initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&conv_workspace_.configuration, &conv_workspace_.arguments); } /// Initializes the performance result void Conv2dOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::ConvDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; result.arguments.resize(problem_space.rank()); set_argument(result, "Activation", problem_space, std::string(library::to_string(operation_desc.activation().element)) + ":" + library::to_string(operation_desc.activation().layout)); set_argument(result, "Filter", problem_space, std::string(library::to_string(operation_desc.filter().element)) + ":" + library::to_string(operation_desc.filter().layout)); set_argument(result, "Output", problem_space, std::string(library::to_string(operation_desc.output().element)) + ":" + library::to_string(operation_desc.output().layout)); set_argument(result, "conv_kind", problem_space, library::to_string(operation_desc.conv_kind)); set_argument(result, "iterator_algorithm", problem_space, std::string(library::to_string(operation_desc.iterator_algorithm))); set_argument(result, "n", problem_space, problem_.n); set_argument(result, "h", problem_space, problem_.h); set_argument(result, "w", problem_space, problem_.w); set_argument(result, "c", problem_space, problem_.c); set_argument(result, "k", problem_space, problem_.k); set_argument(result, "r", problem_space, problem_.r); set_argument(result, "s", problem_space, problem_.s); set_argument(result, "p", problem_space, problem_.p); set_argument(result, "q", problem_space, problem_.q); set_argument(result, "g", problem_space, problem_.groups); set_argument(result, "pad_h", problem_space, problem_.pad_h); set_argument(result, "pad_w", problem_space, problem_.pad_w); set_argument(result, "stride_h", problem_space, problem_.stride_h); set_argument(result, "stride_w", problem_space, problem_.stride_w); set_argument(result, "dilation_h", problem_space, problem_.dilation_h); set_argument(result, "dilation_w", problem_space, problem_.dilation_w); set_argument(result, "split_k_mode", problem_space, std::string(library::to_string(problem_.split_k_mode))); set_argument(result, "split_k_slices", problem_space, problem_.split_k_slices); set_argument(result, "conv_mode", problem_space, std::string(library::to_string(problem_.conv_mode))); set_argument(result, "alpha", problem_space, library::lexical_cast(problem_.alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(problem_.beta, operation_desc.element_epilogue)); set_argument(result, "eq_gemm_provider", problem_space, std::string(library::to_string(problem_.eq_gemm_provider))); OperationProfiler::initialize_result_(result, operation_desc, problem_space); // Bytes of activation, filter, and output tensors int64_t activation_bytes = int64_t(library::sizeof_bits(operation_desc.activation().element) / 8) * conv_workspace_.configuration.problem_size.activation_size(); int64_t filter_bytes = int64_t(library::sizeof_bits(operation_desc.filter().element) / 8) * conv_workspace_.configuration.problem_size.filter_size(); int64_t output_bytes = int64_t(library::sizeof_bits(operation_desc.output().element) / 8) * conv_workspace_.configuration.problem_size.output_size(); // Bytes of activation, filter, and output tensors result.bytes = problem_.bytes(operation_desc); // Theoretical flops required for the computation result.flops = problem_.flops(operation_desc); // Measured runtime result.runtime = 0; } /// Initialize reduction problem dimensions and library::Operation bool Conv2dOperationProfiler::initialize_reduction_configuration_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::ConvDescription const &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); library::ConvKind const &conv_kind = conv_desc.conv_kind; if (!cast_from_double(problem_.alpha_one, conv_desc.element_epilogue, 1)) { return false; } if (!cast_from_double(problem_.beta_zero, conv_desc.element_epilogue, 0)) { return false; } /// This chooses the appropriate stride element of the row-major C tensor. int const & tensor_c_stride_idx = (conv_kind == library::ConvKind::kWgrad ? 2 : 0); /// initialize library::ReductionConfiguration conv_workspace_.reduction_configuration.problem_size = problem_.eq_gemm_size(conv_kind).mn(); conv_workspace_.reduction_configuration.partitions = int(problem_.split_k_slices); conv_workspace_.reduction_configuration.partition_stride = problem_.eq_gemm_size(conv_kind).mn().product(); conv_workspace_.reduction_configuration.ldw = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.lds = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; conv_workspace_.reduction_configuration.ldd = conv_workspace_.configuration.stride_c[tensor_c_stride_idx]; // find reduction operation library::ReductionFunctionalKey reduction_key( library::Provider::kCUTLASS, conv_desc.tile_description.math_instruction.element_accumulator, // element workspace conv_desc.tile_description.math_instruction.element_accumulator, // element accumulator conv_desc.C.element, // element output conv_desc.element_epilogue // element compute ); #if 0// debug print to check which reduction instance is selected std::cout << reduction_key << "\n"; #endif auto reduction_it = Singleton::get().operation_table.reduction_operations.find(reduction_key); if(reduction_it == Singleton::get().operation_table.reduction_operations.end()) { return false; } // initialize reduction operation required for parallel split-k conv2d operator reduction_op_ = reduction_it->second; // reduction operation found and initialized return true; } /// Initializes workspace Status Conv2dOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } library::ConvDescription const &operation_desc = static_cast<library::ConvDescription const &>(underlying_operation->description()); // Compute the number of copies of the problem to avoid L2 camping. if (!options.profiling.workspace_count) { int64_t bytes = problem_.bytes(operation_desc); if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) { conv_workspace_.problem_count = 1 + int((3 * int64_t(options.device.properties.l2CacheSize)) / bytes); } else { conv_workspace_.problem_count = 1; } } else { conv_workspace_.problem_count = options.profiling.workspace_count; } if (options.execution_mode != ExecutionMode::kDryRun) { conv_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, problem_.extent_a(operation_desc.conv_kind), conv_workspace_.configuration.stride_a, conv_workspace_.problem_count ); conv_workspace_.B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.configuration.stride_b, conv_workspace_.problem_count ); if(problem_.groups == problem_.c && problem_.groups == problem_.k){ // Depthwise direct conv kernel needs reorder the filter. conv_workspace_.reordered_B = device_context.allocate_tensor( options, "B", operation_desc.B.element, operation_desc.B.layout, problem_.extent_b(operation_desc.conv_kind), conv_workspace_.configuration.stride_b, conv_workspace_.problem_count ); } conv_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); conv_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); conv_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, problem_.extent_c(operation_desc.conv_kind), conv_workspace_.configuration.stride_c, conv_workspace_.problem_count ); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = underlying_operation->get_host_workspace_size(&conv_workspace_.configuration); conv_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = underlying_operation->get_device_workspace_size(&conv_workspace_.configuration); conv_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = underlying_operation->initialize( &conv_workspace_.configuration, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (status != Status::kSuccess) { return status; } if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { workspace_size = reduction_op_->get_host_workspace_size(&conv_workspace_.reduction_configuration); conv_workspace_.reduction_host_workspace.resize(workspace_size, 0); status = reduction_op_->initialize( &conv_workspace_.reduction_configuration, conv_workspace_.reduction_host_workspace.data(), nullptr); if (status != Status::kSuccess) { return status; } } } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kConv2d; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool Conv2dOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } cudaError_t result; // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Computed->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.reordered_B != nullptr){ conv_workspace_.arguments.reordered_B = conv_workspace_.reordered_B->data(); }else{ conv_workspace_.arguments.reordered_B = nullptr; } conv_workspace_.Computed->copy_from_device(conv_workspace_.C->data()); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } // // Run the CUTLASS operation // // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { results_.back().disposition = Disposition::kFailed; return false; } } #if 0 std::cout << "profiling : " << std::endl << "conv2d : " << operation->description().name << std::endl << "underlying conv2d : " << underlying_operation->description().name << std::endl << "reduction : " << reduction_op_->description().name << std::endl; #endif // run cutlass conv2d operation results_.back().status = underlying_operation->run( &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { results_.back().status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } } // Synchronize before running device reference result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUDNN // Run verification cudnn reference if (options.verification.provider_enabled(library::Provider::kCUDNN)) { // Guard against unsupported cases auto const & conv_desc = static_cast<library::ConvDescription const &>(operation->description()); Status status = cudnn_satisfies(conv_desc, conv_workspace_.configuration); // Initialize reference data to the source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); if (status == Status::kSuccess) { // call cudnn verification if supported verify_with_cudnn_( options, report, device_context, operation, problem_space, problem); } else if (status == Status::kErrorInvalidProblem) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kInvalidProblem; } else { // set verification map for cudnn to not supported results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUDNN // Run verification device reference if (options.verification.provider_enabled(library::Provider::kReferenceDevice)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_device_reference_( options, report, device_context, operation, problem_space, problem); } // Run verification host reference if (options.verification.provider_enabled(library::Provider::kReferenceHost)) { // Restore reference data back to initial source data conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); verify_with_host_reference_( options, report, device_context, operation, problem_space, problem); } // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv2dOperationProfiler::verify_with_host_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find host reference operation using conv2d functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv2d_key( library::Provider::kReferenceHost, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); #if 0 // debug print to check which host reference instance is selected std::cout << conv2d_key << "\n"; #endif auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // conv2d host reference minimum cc is 0 (CPU) and no iterator algorithm library::ConvPreferenceKey preference_key(0, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotRun; return true; } // host reference has only one instances in Conv2dOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Copy input tensors A, B, and C from device to host buffers // conv_workspace_.host_tensor_a.resize(conv_workspace_.A->bytes()); conv_workspace_.host_tensor_b.resize(conv_workspace_.B->bytes()); conv_workspace_.host_tensor_c.resize(conv_workspace_.C->bytes()); conv_workspace_.A->copy_to_host(conv_workspace_.host_tensor_a.data()); conv_workspace_.B->copy_to_host(conv_workspace_.host_tensor_b.data()); conv_workspace_.C->copy_to_host(conv_workspace_.host_tensor_c.data()); // // Initialize structure containing Conv2d arguments // conv_workspace_.arguments.A = conv_workspace_.host_tensor_a.data(); conv_workspace_.arguments.B = conv_workspace_.host_tensor_b.data(); conv_workspace_.arguments.C = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.D = conv_workspace_.host_tensor_c.data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Initialize host reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // // Run host reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceHost] = Disposition::kNotVerified; return true; } // // Copy host reference output to device memory for equality check on device // conv_workspace_.Reference->copy_from_host(conv_workspace_.arguments.D); // // Verify results // results_.back().verification_map[library::Provider::kReferenceHost] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceHost] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceHost); } // Return true means continue profiling return true; } /// Verifies CUTLASS against host reference bool Conv2dOperationProfiler::verify_with_device_reference_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { Status status; // // Find device reference operation using conv2d functional description key // library::OperationDescription const &desc = operation->description(); auto &conv_desc = static_cast<library::ConvDescription const &>(desc); library::ConvFunctionalKey conv2d_key( library::Provider::kReferenceDevice, conv_desc.conv_kind, conv_desc.A.element, conv_desc.A.layout, conv_desc.B.element, conv_desc.B.layout, conv_desc.C.element, conv_desc.C.layout, conv_desc.tile_description.math_instruction.element_accumulator, conv_desc.element_epilogue); auto operators_it = Singleton::get().operation_table.conv2d_operations.find(conv2d_key); if(operators_it == Singleton::get().operation_table.conv2d_operations.end()) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; return true; } // conv2d device reference minimum cc is 50 and no iterator algorithm library::ConvPreferenceKey preference_key(50, library::IteratorAlgorithmID::kNone); auto cc_it = operators_it->second.find(preference_key); if(cc_it == operators_it->second.end()) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotRun; return true; } // device reference has only one instances in Conv2dOperationVectorMap library::Operation const *reference_op = cc_it->second[0]; // // Initialize device reference operation // std::vector<uint8_t> host_workspace_reference_op; uint64_t workspace_size = reference_op->get_host_workspace_size(&conv_workspace_.configuration); host_workspace_reference_op.resize(workspace_size, 0); reference_op->initialize( &conv_workspace_.configuration, host_workspace_reference_op.data()); // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run device reference operation // status = reference_op->run( &conv_workspace_.arguments, host_workspace_reference_op.data()); // Handle errors if (status != Status::kSuccess) { results_.back().verification_map[library::Provider::kReferenceDevice] = Disposition::kNotVerified; return true; } // // Verify results // results_.back().verification_map[library::Provider::kReferenceDevice] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kReferenceDevice] == Disposition::kIncorrect) { save_workspace( device_context, options, static_cast<library::ConvDescription const &>(operation->description()), library::Provider::kCUTLASS, library::Provider::kReferenceDevice); } // Return true means continue profiling return true; } /// Measures performance results bool Conv2dOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.C = conv_workspace_.C->data(); conv_workspace_.arguments.D = conv_workspace_.Computed->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_workspace_.arguments.D = conv_workspace_.device_workspace.data(); conv_workspace_.arguments.alpha = problem_.alpha_one.data(); conv_workspace_.arguments.beta = problem_.beta_zero.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->data(); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->data(); conv_workspace_.reduction_arguments.alpha = problem_.alpha.data(); conv_workspace_.reduction_arguments.beta = problem_.beta.data(); conv_workspace_.reduction_arguments.pointer_mode = library::ScalarPointerMode::kHost; } results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &conv_workspace_.arguments, conv_workspace_.host_workspace.data(), conv_workspace_.device_workspace.data() ); } return true; } /// Method to profile a CUTLASS Operation Status Conv2dOperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // initialize conv2d underlying operation to handle parallel reduction library::Operation const* underlying_operation = operation; library::ConvArguments *conv_arguments = static_cast<library::ConvArguments *>(arguments); if(conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { if (!(underlying_operation = library::find_conv_operation_for_parallel_reduction(operation))) { return Status::kErrorNotSupported; } } // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { // Setup rotating workspace int workspace_idx = options.profiling.warmup_iterations + iteration; int problem_idx = (workspace_idx % conv_workspace_.problem_count); conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_arguments->D = conv_workspace_.device_workspace.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); } // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { // Setup rotating workspace int problem_idx = (iteration % conv_workspace_.problem_count); conv_arguments->A = conv_workspace_.A->batch_data(problem_idx); conv_arguments->B = conv_workspace_.B->batch_data(problem_idx); conv_arguments->C = conv_workspace_.C->batch_data(problem_idx); conv_arguments->D = conv_workspace_.Computed->batch_data(problem_idx); if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { // update library::ConvArguments for parallel split-k reduction conv_arguments->D = conv_workspace_.device_workspace.data(); /// initialize library::ReductionArguments conv_workspace_.reduction_arguments.workspace = conv_workspace_.device_workspace.data(); conv_workspace_.reduction_arguments.source = conv_workspace_.C->batch_data(problem_idx); conv_workspace_.reduction_arguments.destination = conv_workspace_.Computed->batch_data(problem_idx); } // Run underlying conv2d operation status = underlying_operation->run( arguments, host_workspace, device_workspace); // Run parallel reduction kernel for parallel split_k_mode if (conv_workspace_.configuration.split_k_mode == conv::SplitKMode::kParallel) { status = reduction_op_->run( &conv_workspace_.reduction_arguments, conv_workspace_.reduction_host_workspace.data(), nullptr); } if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if CUTLASS_ENABLE_CUDNN /// Verifies CUTLASS against cudnn reference bool Conv2dOperationProfiler::verify_with_cudnn_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { auto &conv_desc = static_cast<library::ConvDescription const &>(operation->description()); // // Construct cudnn operators // CudnnCreate handle; cudnnStatus_t status = handle.get_cudnn_create_status(); if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Initialize state // // Initialize structure containing Conv2d arguments conv_workspace_.arguments.A = conv_workspace_.A->data(); conv_workspace_.arguments.B = conv_workspace_.B->data(); conv_workspace_.arguments.D = conv_workspace_.Reference->data(); conv_workspace_.arguments.alpha = problem_.alpha.data(); conv_workspace_.arguments.beta = problem_.beta.data(); conv_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // cuDNN does not support four tensor arguments, so we copy the tensor C data into // tensor D. conv_workspace_.Reference->copy_from_device(conv_workspace_.C->data()); conv_workspace_.arguments.C = conv_workspace_.arguments.D; try { // // Construct dispatcher to cudnn operator // detail::cudnnConvDispatcher conv_op( conv_desc, conv_workspace_.configuration, conv_workspace_.arguments, handle ); if (conv_op.status != Status::kSuccess) { if (conv_op.status == Status::kErrorNotSupported) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kNotSupported; } else { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } return true; } status = conv_op(handle); // Handle errors if (status != CUDNN_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUDNN] = get_cutlass_disposition(status); return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUDNN] = compare_tensors( options, *conv_workspace_.Computed, *conv_workspace_.Reference, conv_workspace_.Computed->batch_stride() ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUDNN] == Disposition::kIncorrect) { save_workspace( device_context, options, conv_desc, library::Provider::kCUTLASS, library::Provider::kCUDNN); } } catch (...) { results_.back().verification_map[library::Provider::kCUDNN] = Disposition::kFailed; } // Return true means continue profiling return true; } #endif // #if CUTLASS_ENABLE_CUDNN ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
9a7aa1f56e078978b728226ae6c9632d01c0270b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Original code from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/causal_product_cuda.cu // Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ // Written by Angelos Katharopoulos <[email protected]>, // Apoorv Vyas <[email protected]> // // Modified to implement the fast weight LSTM V3 with FWM update rule*. // v3 = v2 + res. connection from feed-forward part of the pre-act update term. // Copyright (c) 2021 Kazuki Irie #include <torch/extension.h> // #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> // #include <iostream> typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor; // sigmoid __device__ float sgmf(float x) { return 1.f / (1.f + expf(-x)); } // Forward kernel for fast weight LSTM: // - coupled input-forget gate. // - no peephole connections. // - all activations are sigmoid to get positive recurrent queries. // Equations; for input z_t ... __global__ void fast_lstm_v4_forward_kernel( const float_accessor inputs_i, // input gate const float_accessor keys_i, const float_accessor values_i, const float_accessor betas_i, const float_accessor inputs_u, // update candidate const float_accessor keys_u, const float_accessor values_u, const float_accessor betas_u, const float_accessor inputs_o, // output gate const float_accessor keys_o, const float_accessor values_o, const float_accessor betas_o, float_accessor states, float_accessor cells, float_accessor kv_i, float_accessor kv_u, float_accessor kv_o, float_accessor result, float_accessor res_del_nmz, float_accessor res_cell, float_accessor gate_i, float_accessor update_u, float_accessor gate_o, float_accessor v_old_i, float_accessor v_old_u, float_accessor v_old_o, const int N, const int H, const int L, const int E, const int M, const int E_per_subblock, const int subblocks_per_seq, const int T, // block chunk size in time dim. const int l_offset // multiple of T, length offset. ) { // Each block takes care of one sequence. // blockIdx.x = n * H + h int n = blockIdx.x / H; // batch id int h = blockIdx.x % H; // head id // threadIdx.x = e_local*M + m // Local e coordinate within E_per_subblock sub-block. int e_local = threadIdx.x / M; int m = threadIdx.x % M; const int E_block = subblocks_per_seq * E_per_subblock; // Load the shared memory const int shared_kv_size = E_block * M; extern __shared__ float shared_mem[]; float* shared_kv_i = shared_mem; float* shared_kv_u = shared_kv_i + shared_kv_size; float* shared_kv_o = shared_kv_u + shared_kv_size; float* shared_states = shared_kv_o + shared_kv_size; float* shared_cells = shared_states + M; float* shared_gate_i = shared_cells + M; float* shared_update = shared_gate_i + M; float* shared_gate_o = shared_update + M; float* shared_v_old_i = shared_gate_o + M; float* shared_v_old_u = shared_v_old_i + M; float* shared_v_old_o = shared_v_old_u + M; float* shared_betas_i = shared_v_old_o + M; float* shared_betas_u = shared_betas_i + T; float* shared_betas_o = shared_betas_u + T; float* softmax_denom = shared_betas_o + T; float* max_value = softmax_denom + 1; float* shared_values_i = max_value + 1; // input gate float* shared_keys_i = shared_values_i + M*T; float* shared_inputs_i = shared_keys_i + E_block*T; float* shared_values_u = shared_inputs_i + M*T; // update candidate float* shared_keys_u = shared_values_u + M*T; float* shared_inputs_u = shared_keys_u + E_block*T; float* shared_values_o = shared_inputs_u + M*T; // output gate float* shared_keys_o = shared_values_o + M*T; float* shared_inputs_o = shared_keys_o + E_block*T; const float eps = 1e-6; if (threadIdx.x < M) { // m = threadIdx.x if threadIdx.x < M. // shared_results[m] = 0.f; shared_update[m] = 0.f; shared_gate_i[m] = 0.f; shared_gate_o[m] = 0.f; shared_v_old_i[m] = 0.f; shared_v_old_u[m] = 0.f; shared_v_old_o[m] = 0.f; } if (threadIdx.x < 1) { softmax_denom[0] = 0.f; max_value[0] = 0.f; } // the last segment is shorter. int t_end = (T + l_offset) <= L ? T : L - l_offset; for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x) { int t = int(i / M) + l_offset; int d = i % M; shared_values_i[i] = values_i[n][h][t][d]; shared_inputs_i[i] = inputs_i[n][h][t][d]; shared_values_u[i] = values_u[n][h][t][d]; shared_inputs_u[i] = inputs_u[n][h][t][d]; shared_values_o[i] = values_o[n][h][t][d]; shared_inputs_o[i] = inputs_o[n][h][t][d]; } for (int i = threadIdx.x; i < (t_end*E_block); i += blockDim.x) { int t = int(i / E_block) + l_offset; int d = (i % E_block); if (d < E) { shared_keys_i[i] = keys_i[n][h][t][d]; shared_keys_u[i] = keys_u[n][h][t][d]; shared_keys_o[i] = keys_o[n][h][t][d]; } } for (int i = threadIdx.x; i < t_end; i += blockDim.x) { int t = i + l_offset; shared_betas_i[i] = betas_i[n][h][t][0]; shared_betas_u[i] = betas_u[n][h][t][0]; shared_betas_o[i] = betas_o[n][h][t][0]; } __syncthreads(); if (n >= N) { return; } int e; int kv_idx; for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { shared_kv_i[kv_idx] = kv_i[n][h][e][m]; shared_kv_u[kv_idx] = kv_u[n][h][e][m]; shared_kv_o[kv_idx] = kv_o[n][h][e][m]; } } // init variables if (threadIdx.x < M) { // initialize RNN state shared_states[m] = states[n][h][0][m]; shared_cells[m] = cells[n][h][0][m]; } int e_abs; float resi, resu, reso; float max_val, tmp_max; // float res_v_old_i, res_v_old_u, res_v_old_o; for (int t=0; t<t_end; t++) { // loop over time in the segment int l = t + l_offset; // absolute position in time int m_abs = t*M + m; // For stable softmax if (threadIdx.x < 1) { // Not parallelized! this should be improved! max_val = shared_states[0]; for (int i = 1; i < M; i++) { tmp_max = shared_states[i]; if (tmp_max > max_val) { max_val = tmp_max; } } max_value[0] = max_val; } __syncthreads(); // compute denominator for softmax if (threadIdx.x < M) { shared_states[m] = expf(shared_states[m] - max_value[0]); atomicAdd( &softmax_denom[0], shared_states[m] ); } __syncthreads(); for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // get old value float res_v_old_i = shared_kv_i[kv_idx] * shared_keys_i[e_abs]; atomicAdd( &shared_v_old_i[m], res_v_old_i ); float res_v_old_u = shared_kv_u[kv_idx] * shared_keys_u[e_abs]; atomicAdd( &shared_v_old_u[m], res_v_old_u ); float res_v_old_o = shared_kv_o[kv_idx] * shared_keys_o[e_abs]; atomicAdd( &shared_v_old_o[m], res_v_old_o ); } } __syncthreads(); // compute new value to be inserted float v_insert_i = shared_betas_i[t] * (shared_values_i[m_abs] - shared_v_old_i[m]); float v_insert_u = shared_betas_u[t] * (shared_values_u[m_abs] - shared_v_old_u[m]); float v_insert_o = shared_betas_o[t] * (shared_values_o[m_abs] - shared_v_old_o[m]); for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // Update all fast weights shared_kv_i[kv_idx] += shared_keys_i[e_abs] * v_insert_i; shared_kv_u[kv_idx] += shared_keys_u[e_abs] * v_insert_u; shared_kv_o[kv_idx] += shared_keys_o[e_abs] * v_insert_o; float soft_out = shared_states[e] / (softmax_denom[0] + eps); // Compute recurrent preactivation terms resi = soft_out * shared_kv_i[kv_idx]; atomicAdd( &shared_gate_i[m], resi ); resu = soft_out * shared_kv_u[kv_idx]; atomicAdd( &shared_update[m], resu ); reso = soft_out * shared_kv_o[kv_idx]; atomicAdd( &shared_gate_o[m], reso ); } } __syncthreads(); float out, new_cell; if (threadIdx.x < M) { // m = threadIdx.x if threadIdx.x < M // Ideally skip this for eval. Saving for bwd pass. float tmp = shared_states[m] / (softmax_denom[0] + eps); atomicAdd( &res_del_nmz[n][h][l][m], tmp ); // sigmoid shared_gate_i[m] = sgmf(shared_gate_i[m] + shared_inputs_i[m_abs]); // FINDME v4 shared_update[m] = shared_update[m] + shared_inputs_u[m_abs]; shared_gate_o[m] = sgmf(shared_gate_o[m] + shared_inputs_o[m_abs]); new_cell = shared_gate_i[m] * shared_update[m] + (1.f - shared_gate_i[m]) * shared_cells[m]; out = shared_gate_o[m] * new_cell; // out = expf(shared_gate_o[m] * new_cell); // atomicAdd( // &softmax_denom[0], // out // ); // write back intermediate results to be used for backward pass. atomicAdd( &result[n][h][l][m], out ); shared_states[m] = out; // state update atomicAdd( &res_cell[n][h][l][m], new_cell ); shared_cells[m] = new_cell; float out_i = shared_gate_i[m]; atomicAdd( &gate_i[n][h][l][m], out_i ); float out_u = shared_update[m]; atomicAdd( &update_u[n][h][l][m], out_u ); float out_o = shared_gate_o[m]; atomicAdd( &gate_o[n][h][l][m], out_o ); // initialize gates and update: shared_gate_i[m] = 0.f; shared_update[m] = 0.f; shared_gate_o[m] = 0.f; float r2i = shared_v_old_i[m]; atomicAdd( &v_old_i[n][h][l][m], r2i ); shared_v_old_i[m] = 0.f; float r2u = shared_v_old_u[m]; atomicAdd( &v_old_u[n][h][l][m], r2u ); shared_v_old_u[m] = 0.f; float r2o = shared_v_old_o[m]; atomicAdd( &v_old_o[n][h][l][m], r2o ); shared_v_old_o[m] = 0.f; } __syncthreads(); if (threadIdx.x < 1) { softmax_denom[0] = 0.f; } __syncthreads(); } __syncthreads(); // write back to kv to be carried over to the next segment. for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { kv_i[n][h][e][m] = shared_kv_i[kv_idx]; kv_u[n][h][e][m] = shared_kv_u[kv_idx]; kv_o[n][h][e][m] = shared_kv_o[kv_idx]; } } if (threadIdx.x < M) { states[n][h][0][m] = shared_states[m]; cells[n][h][0][m] = shared_cells[m]; } } // Forward void fast_lstm_v4_forward( const torch::Tensor inputs_i, // input gate const torch::Tensor keys_i, const torch::Tensor values_i, const torch::Tensor betas_i, const torch::Tensor inputs_u, // update const torch::Tensor keys_u, const torch::Tensor values_u, const torch::Tensor betas_u, const torch::Tensor inputs_o, // output gate const torch::Tensor keys_o, const torch::Tensor values_o, const torch::Tensor betas_o, torch::Tensor states, // init states torch::Tensor cells, // init cell states torch::Tensor kv_i, // might be non zero if carried over from previous seg torch::Tensor kv_u, torch::Tensor kv_o, torch::Tensor outputs, torch::Tensor nmz_delay, // softmax output delayed torch::Tensor cell_outs, torch::Tensor gate_i, torch::Tensor update_u, torch::Tensor gate_o, torch::Tensor v_old_i, torch::Tensor v_old_u, torch::Tensor v_old_o ) { // const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(queries)); torch::DeviceGuard _guard(inputs_i.device()); int N = inputs_i.size(0); int H = inputs_i.size(1); int L = inputs_i.size(2); int E = inputs_i.size(3); int M = values_i.size(3); // int threads = 1024; int threads = 512; // avoid edge cases. // Shared mem max size is 48KB int MUL_PER_BLOCK = min(threads, E*M); // make sure that MUL_PER_BLOCK is divisible by M; MUL_PER_BLOCK = int(MUL_PER_BLOCK / M) * M; threads = MUL_PER_BLOCK; const int subblocks_per_seq = ((E*M) + threads -1) / threads; const int E_per_subblock = MUL_PER_BLOCK / M; const int E_block = subblocks_per_seq * E_per_subblock; // int blocks = N*H*blocks_per_sequence; int blocks = N*H; // total number of sequences // 3 fast weight, 2 output/cells, 3 transforms, 3 for v_old, // 1 softmax denominator, +1 to store max for stable softmax. int shared_mem_const = (E_block * 3 + 5 + 3)*M + 1 + 1; // M for value, 2 * E for query and key. int shared_mem_per_time = 6*M + 3*E_block + 3; // Max shared memory size: // 12 * 1024 * 4 (float) = 49152 (48KB) // for Turing: 65536 (64KB) // for Volta: 98304 (96KB) int maxB; int device_id = 0; // int device_id = inputs_i.device(); // Should to be faster than `hipGetDeviceProperties` according to: https://developer.nvidia.com/blog/cuda-pro-tip-the-fast-way-to-query-device-properties/ hipDeviceGetAttribute(&maxB, hipDeviceAttributeSharedMemPerBlockOptin, device_id); // std::cout << "Max shared mem: " << maxB << std::endl; int maxF = maxB / sizeof(float); // Following is needed for sm > 48KB hipFuncSetAttribute(fast_lstm_v4_forward_kernel, hipFuncAttributeMaxDynamicSharedMemorySize, maxB); // hipDeviceProp_t prop; // hipGetDeviceProperties(&prop, 0); assert(maxF - shared_mem_const > 0 && "`d_head` too large. To obtain large models, keep `d_head` small" "e.g. 16 and increase the number of heads instead."); // std::cout << "Max shared mem: " << maxF * sizeof(float) << std::endl; // std::cout << "Shared mem const (float): " << // shared_mem_const * sizeof(float) << std::endl; // std::cout << "Remainder: " << maxF - shared_mem_const << std::endl; // std::cout << "Shared per time: " << shared_mem_per_time << std::endl; const int T = int((maxF - shared_mem_const) / shared_mem_per_time); const int shared_mem_forward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float); // std::cout << "Total used shared mem: " << shared_mem_forward << std::endl; for (int l_offset=0; l_offset < L; l_offset += T) { hipLaunchKernelGGL(( fast_lstm_v4_forward_kernel) , dim3(blocks), dim3(MUL_PER_BLOCK), shared_mem_forward, 0, inputs_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), inputs_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), inputs_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), states.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), cells.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), kv_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), kv_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), kv_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), outputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), nmz_delay.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), cell_outs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), gate_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), update_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), gate_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M, E_per_subblock, subblocks_per_seq, T, l_offset ); } } // Backward kernel, output gate __global__ void fast_lstm_v4_backward_kernel( const float_accessor keys_i, const float_accessor values_i, const float_accessor betas_i, const float_accessor keys_u, const float_accessor values_u, const float_accessor betas_u, const float_accessor keys_o, const float_accessor values_o, const float_accessor betas_o, const float_accessor v_out_i, const float_accessor v_out_u, const float_accessor v_out_o, const float_accessor rnn_out, const float_accessor rnn_out_delayed, const float_accessor cell_out, const float_accessor u_minus_c, const float_accessor grad_out, const float_accessor gate_i, const float_accessor update_u, const float_accessor gate_o, float_accessor grad_h, // output tmp grad float_accessor grad_c, // cell tmp grad float_accessor kv_i, // kv memory from the forward pass float_accessor kv_u, float_accessor kv_o, float_accessor grad_kv_i, // kv temporal grad float_accessor grad_kv_u, float_accessor grad_kv_o, float_accessor grad_inputs_i, // input gate float_accessor grad_keys_i, float_accessor grad_values_i, float_accessor grad_betas_i, float_accessor grad_inputs_u, // update float_accessor grad_keys_u, float_accessor grad_values_u, float_accessor grad_betas_u, float_accessor grad_inputs_o, // output gate float_accessor grad_keys_o, float_accessor grad_values_o, float_accessor grad_betas_o, int N, int H, int L, int E, int M, int E_per_subblock, int subblocks_per_seq, int T, int l_offset ) { // Each block takes care of one sequence. // blockIdx.x = n * H + h int n = blockIdx.x / H; int h = blockIdx.x % H; // threadIdx.x = e_local*M + m // Local e coordinate within E_per_subblock sub-block. int e_local = threadIdx.x / M; int m = threadIdx.x % M; const int E_block = subblocks_per_seq * E_per_subblock; // Load the shared memory for KV const int shared_kv_size = E_block * M; extern __shared__ float shared_mem[]; float* shared_kv_i = shared_mem; float* shared_grad_kv_i = shared_mem + shared_kv_size; float* shared_kv_u = shared_grad_kv_i + shared_kv_size; float* shared_grad_kv_u = shared_kv_u + shared_kv_size; float* shared_kv_o = shared_grad_kv_u + shared_kv_size; float* shared_grad_kv_o = shared_kv_o + shared_kv_size; float* shared_res_zi = shared_grad_kv_o + shared_kv_size; float* shared_res_zu = shared_res_zi + M; float* shared_res_zo = shared_res_zu + M; float* shared_res_k_i = shared_res_zo + M; float* shared_res_k_u = shared_res_k_i + M; float* shared_res_k_o = shared_res_k_u + M; float* shared_res_v_i = shared_res_k_o + M; float* shared_res_v_u = shared_res_v_i + M; float* shared_res_v_o = shared_res_v_u + M; float* shared_grad_v_old_i = shared_res_v_o + M; float* shared_grad_v_old_u = shared_grad_v_old_i + M; float* shared_grad_v_old_o = shared_grad_v_old_u + M; float* shared_res_beta_i = shared_grad_v_old_o + M; float* shared_res_beta_u = shared_res_beta_i + 1; float* shared_res_beta_o = shared_res_beta_u + 1; float* grad_sft_cst = shared_res_beta_o + 1; float* shared_gradout = grad_sft_cst + 1; float* shared_keys_i = shared_gradout + M*T; float* shared_values_i = shared_keys_i + E_block*T; float* shared_keys_u = shared_values_i + M*T; float* shared_values_u = shared_keys_u + E_block*T; float* shared_keys_o = shared_values_u + M*T; float* shared_values_o = shared_keys_o + E_block*T; float* shared_rnn_out = shared_values_o + M*T; float* shared_rnn_out_delayed = shared_rnn_out + M*T; float* shared_c = shared_rnn_out_delayed + M*T; float* shared_u_m_c = shared_c + M*T; float* shared_gate_i = shared_u_m_c + M*T; float* shared_update = shared_gate_i + M*T; float* shared_gate_o = shared_update + M*T; float* shared_v_old_i = shared_gate_o + M*T; float* shared_v_old_u = shared_v_old_i + M*T; float* shared_v_old_o = shared_v_old_u + M*T; float* shared_betas_i = shared_v_old_o + M*T; float* shared_betas_u = shared_betas_i + T; float* shared_betas_o = shared_betas_u + T; float* shared_grad_h = shared_betas_o + T; float* shared_grad_c = shared_grad_h + M*T; if (threadIdx.x < M) { shared_res_zi[m] = 0.f; shared_res_zu[m] = 0.f; shared_res_zo[m] = 0.f; shared_res_k_i[m] = 0.f; shared_res_k_u[m] = 0.f; shared_res_k_o[m] = 0.f; shared_res_v_i[m] = 0.f; shared_res_v_u[m] = 0.f; shared_res_v_o[m] = 0.f; shared_grad_v_old_i[m] = 0.f; shared_grad_v_old_u[m] = 0.f; shared_grad_v_old_o[m] = 0.f; } if (threadIdx.x < 1) { shared_res_beta_i[0] = 0.f; shared_res_beta_u[0] = 0.f; shared_res_beta_o[0] = 0.f; grad_sft_cst[0] = 0.f; // offset for grad softmax } // Everythig goes backward int t_end = (T + l_offset) <= L ? T : L - l_offset; for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x) { int t = int(i / M) + l_offset; int t_bw = L - 1 - t; int d = i % M; shared_gradout[i] = grad_out[n][h][t_bw][d]; shared_rnn_out[i] = rnn_out[n][h][t_bw][d]; shared_c[i] = cell_out[n][h][t_bw][d]; shared_u_m_c[i] = u_minus_c[n][h][t_bw][d]; shared_values_i[i] = values_i[n][h][t_bw][d]; shared_values_u[i] = values_u[n][h][t_bw][d]; shared_values_o[i] = values_o[n][h][t_bw][d]; shared_v_old_i[i] = v_out_i[n][h][t_bw][d]; shared_v_old_u[i] = v_out_u[n][h][t_bw][d]; shared_v_old_o[i] = v_out_o[n][h][t_bw][d]; shared_gate_i[i] = gate_i[n][h][t_bw][d]; shared_update[i] = update_u[n][h][t_bw][d]; shared_gate_o[i] = gate_o[n][h][t_bw][d]; } for (int i = threadIdx.x; i < (t_end*E_block); i += blockDim.x) { int t = int(i / E_block) + l_offset; int t_bw = L - 1 - t; int d = (i % E_block); if (d < E) { shared_rnn_out_delayed[i] = rnn_out_delayed[n][h][t_bw][d]; shared_keys_i[i] = keys_i[n][h][t_bw][d]; shared_keys_u[i] = keys_u[n][h][t_bw][d]; shared_keys_o[i] = keys_o[n][h][t_bw][d]; } } for (int i = threadIdx.x; i < t_end; i += blockDim.x) { int t = i + l_offset; int t_bw = L - 1 - t; shared_betas_i[i] = betas_i[n][h][t_bw][0]; shared_betas_u[i] = betas_u[n][h][t_bw][0]; shared_betas_o[i] = betas_o[n][h][t_bw][0]; } __syncthreads(); if (n >= N) { return; } int e; int e_abs; // absolute idx from t=0 int kv_idx; for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { shared_kv_i[kv_idx] = kv_i[n][h][e][m]; shared_grad_kv_i[kv_idx] = grad_kv_i[n][h][e][m]; shared_kv_u[kv_idx] = kv_u[n][h][e][m]; shared_grad_kv_u[kv_idx] = grad_kv_u[n][h][e][m]; shared_kv_o[kv_idx] = kv_o[n][h][e][m]; shared_grad_kv_o[kv_idx] = grad_kv_o[n][h][e][m]; } } if (threadIdx.x < M) { // threadIdx.x = m if threadIdx.x < M shared_grad_h[m] = grad_h[n][h][0][m]; shared_grad_c[m] = grad_c[n][h][0][m]; } for (int t=0; t<t_end; t++) { int l = t + l_offset; int l_b = L - l -1; int m_abs = t*M + m; if (threadIdx.x < M) { // element-wise ops only here // threadIdx.x = m if threadIdx.x < M shared_grad_h[m] += shared_gradout[m_abs]; // float grad_soft_input = // shared_rnn_out[m_abs] * (shared_grad_h[m] - grad_sft_cst[0]); // for output gate float grad_o = shared_c[m_abs] * shared_grad_h[m]; shared_res_zo[m] = grad_o * (1.f - shared_gate_o[m_abs]) * shared_gate_o[m_abs]; // grad c, no sigmoid shared_grad_c[m] += shared_gate_o[m_abs] * shared_grad_h[m]; // shared_grad_c[m] += shared_gate_o[m_abs] * shared_grad_h[m] // * sgmf(shared_c[m_abs]) * (1.f - sgmf(shared_c[m_abs])); shared_grad_h[m] = 0.f; // prepare grad h for the next step. } __syncthreads(); // important to sync float v_diff_i = shared_values_i[m_abs] - shared_v_old_i[m_abs]; float v_ins_i = v_diff_i * shared_betas_i[t]; float v_diff_u = shared_values_u[m_abs] - shared_v_old_u[m_abs]; float v_ins_u = v_diff_u * shared_betas_u[t]; float v_diff_o = shared_values_o[m_abs] - shared_v_old_o[m_abs]; float v_ins_o = v_diff_o * shared_betas_o[t]; // Output gate for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // grad rec weight shared_grad_kv_o[kv_idx] += shared_res_zo[m] * shared_rnn_out_delayed[e_abs]; // grad v float res_v_o = shared_grad_kv_o[kv_idx] * shared_keys_o[e_abs] * shared_betas_o[t]; atomicAdd( &shared_res_v_o[m], res_v_o ); // grad k part 1 and 2 float res_k_o = shared_grad_kv_o[kv_idx] * v_ins_o; atomicAdd( &shared_res_k_o[e], res_k_o ); // grad beta float res_b_o = shared_grad_kv_o[kv_idx] * shared_keys_o[e_abs] * v_diff_o; atomicAdd( &shared_res_beta_o[0], res_b_o ); // pass grad for the next time step. float res_h_o = shared_res_zo[m] * shared_kv_o[kv_idx]; atomicAdd( &shared_grad_h[e], res_h_o ); // contribution from output gate } } __syncthreads(); if (threadIdx.x < M) { // grad input gate float grad_i = shared_grad_c[m] * shared_u_m_c[m_abs]; shared_res_zi[m] = grad_i * (1.f - shared_gate_i[m_abs]) * shared_gate_i[m_abs]; // grad update shared_res_zu[m] = shared_grad_c[m] * shared_gate_i[m_abs]; // prepare grad c for the next time step shared_grad_c[m] = shared_grad_c[m] * (1.f - shared_gate_i[m_abs]); } __syncthreads(); // important to sync // Grad for input gate and update transformation for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // grad rec weight shared_grad_kv_i[kv_idx] += shared_res_zi[m] * shared_rnn_out_delayed[e_abs]; shared_grad_kv_u[kv_idx] += shared_res_zu[m] * shared_rnn_out_delayed[e_abs]; // grad v float res_v_i = shared_grad_kv_i[kv_idx] * shared_keys_i[e_abs] * shared_betas_i[t]; atomicAdd( &shared_res_v_i[m], res_v_i ); float res_v_u = shared_grad_kv_u[kv_idx] * shared_keys_u[e_abs] * shared_betas_u[t]; atomicAdd( &shared_res_v_u[m], res_v_u ); // grad k float res_k_i = shared_grad_kv_i[kv_idx] * v_ins_i; atomicAdd( &shared_res_k_i[e], res_k_i ); float res_k_u = shared_grad_kv_u[kv_idx] * v_ins_u; atomicAdd( &shared_res_k_u[e], res_k_u ); // grad beta float res_b_i = shared_grad_kv_i[kv_idx] * shared_keys_i[e_abs] * v_diff_i; atomicAdd( &shared_res_beta_i[0], res_b_i ); float res_b_u = shared_grad_kv_u[kv_idx] * shared_keys_u[e_abs] * v_diff_u; atomicAdd( &shared_res_beta_u[0], res_b_u ); // pass gradients to the next time step float res_h_i = shared_res_zi[m] * shared_kv_i[kv_idx]; atomicAdd( &shared_grad_h[e], res_h_i ); // contribution from input gate float res_h_u = shared_res_zu[m] * shared_kv_u[kv_idx]; atomicAdd( &shared_grad_h[e], res_h_u ); // contribution from update transformation } } __syncthreads(); // compute constant for grad softmax if (threadIdx.x < M) { float cst = shared_grad_h[m] * shared_rnn_out_delayed[m_abs]; atomicAdd( &grad_sft_cst[0], cst ); } __syncthreads(); if (threadIdx.x < M) { shared_grad_h[m] = shared_rnn_out_delayed[m_abs] * (shared_grad_h[m] - grad_sft_cst[0]); } if (threadIdx.x < 1) { grad_sft_cst[0] = 0.f; } for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // reverse update fast weight shared_kv_i[kv_idx] -= shared_keys_i[e_abs] * v_ins_i; shared_kv_u[kv_idx] -= shared_keys_u[e_abs] * v_ins_u; shared_kv_o[kv_idx] -= shared_keys_o[e_abs] * v_ins_o; // grad v_old float res_v_old_i = - (shared_grad_kv_i[kv_idx] * shared_betas_i[t] * shared_keys_i[e_abs]); atomicAdd( &shared_grad_v_old_i[m], res_v_old_i ); float res_v_old_u = - (shared_grad_kv_u[kv_idx] * shared_betas_u[t] * shared_keys_u[e_abs]); atomicAdd( &shared_grad_v_old_u[m], res_v_old_u ); float res_v_old_o = - (shared_grad_kv_o[kv_idx] * shared_betas_o[t] * shared_keys_o[e_abs]); atomicAdd( &shared_grad_v_old_o[m], res_v_old_o ); } } __syncthreads(); // remaining key grad for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // Input gate float res_kp3_i = shared_grad_v_old_i[m] * shared_kv_i[kv_idx]; atomicAdd( &shared_res_k_i[e], res_kp3_i ); // remaining key grad // grad kv via v old shared_grad_kv_i[kv_idx] += shared_grad_v_old_i[m] * shared_keys_i[e_abs]; // Update transform float res_kp3_u = shared_grad_v_old_u[m] * shared_kv_u[kv_idx]; atomicAdd( &shared_res_k_u[e], res_kp3_u ); // remaining key grad // grad kv via v old shared_grad_kv_u[kv_idx] += shared_grad_v_old_u[m] * shared_keys_u[e_abs]; // Output gate float res_kp3_o = shared_grad_v_old_o[m] * shared_kv_o[kv_idx]; atomicAdd( &shared_res_k_o[e], res_kp3_o ); // remaining key grad // grad kv via v old shared_grad_kv_o[kv_idx] += shared_grad_v_old_o[m] * shared_keys_o[e_abs]; } } __syncthreads(); if (threadIdx.x < M) { // m = threadIdx.x if threadIdx.x < M // feed-forward part float rzi = shared_res_zi[m]; atomicAdd( &grad_inputs_i[n][h][l_b][m], rzi ); float rzu = shared_res_zu[m]; atomicAdd( &grad_inputs_u[n][h][l_b][m], rzu ); float rzo = shared_res_zo[m]; atomicAdd( &grad_inputs_o[n][h][l_b][m], rzo ); // keys float rki = shared_res_k_i[m]; atomicAdd( &grad_keys_i[n][h][l_b][m], rki ); float rku = shared_res_k_u[m]; atomicAdd( &grad_keys_u[n][h][l_b][m], rku ); float rko = shared_res_k_o[m]; atomicAdd( &grad_keys_o[n][h][l_b][m], rko ); // values float rvi = shared_res_v_i[m]; atomicAdd( &grad_values_i[n][h][l_b][m], rvi ); float rvu = shared_res_v_u[m]; atomicAdd( &grad_values_u[n][h][l_b][m], rvu ); float rvo = shared_res_v_o[m]; atomicAdd( &grad_values_o[n][h][l_b][m], rvo ); // reset shared_res_k_i[m] = 0.f; shared_res_k_u[m] = 0.f; shared_res_k_o[m] = 0.f; shared_res_v_i[m] = 0.f; shared_res_v_u[m] = 0.f; shared_res_v_o[m] = 0.f; shared_grad_v_old_i[m] = 0.f; shared_grad_v_old_u[m] = 0.f; shared_grad_v_old_o[m] = 0.f; } __syncthreads(); if (threadIdx.x < 1) { // input atomicAdd( &grad_betas_i[n][h][l_b][0], shared_res_beta_i[0] ); shared_res_beta_i[0] = 0.f; // update atomicAdd( &grad_betas_u[n][h][l_b][0], shared_res_beta_u[0] ); shared_res_beta_u[0] = 0.f; // output gate atomicAdd( &grad_betas_o[n][h][l_b][0], shared_res_beta_o[0] ); shared_res_beta_o[0] = 0.f; } __syncthreads(); } __syncthreads(); // write back temporal gradients. for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { kv_i[n][h][e][m] = shared_kv_i[kv_idx]; grad_kv_i[n][h][e][m] = shared_grad_kv_i[kv_idx]; kv_u[n][h][e][m] = shared_kv_u[kv_idx]; grad_kv_u[n][h][e][m] = shared_grad_kv_u[kv_idx]; kv_o[n][h][e][m] = shared_kv_o[kv_idx]; grad_kv_o[n][h][e][m] = shared_grad_kv_o[kv_idx]; } } if (threadIdx.x < M) { // threadIdx.x = m if threadIdx.x < M grad_h[n][h][0][m] = shared_grad_h[m]; grad_c[n][h][0][m] = shared_grad_c[m]; } } // Backward pass // This is very shared_mem intensive for the standard LSTM... void fast_lstm_v4_backward( const torch::Tensor grad_out, const torch::Tensor keys_i, const torch::Tensor values_i, const torch::Tensor betas_i, const torch::Tensor keys_u, const torch::Tensor values_u, const torch::Tensor betas_u, const torch::Tensor keys_o, const torch::Tensor values_o, const torch::Tensor betas_o, const torch::Tensor v_old_i, const torch::Tensor v_old_u, const torch::Tensor v_old_o, const torch::Tensor outputs, const torch::Tensor o_delayed, const torch::Tensor cell_out, const torch::Tensor u_minus_c, const torch::Tensor gate_i, const torch::Tensor update_u, const torch::Tensor gate_o, torch::Tensor fw_mem_i, // from the forward pass. torch::Tensor fw_mem_u, torch::Tensor fw_mem_o, torch::Tensor grad_in_i, // input gate torch::Tensor grad_ki, torch::Tensor grad_vi, torch::Tensor grad_bi, torch::Tensor grad_in_u, // update torch::Tensor grad_ku, torch::Tensor grad_vu, torch::Tensor grad_bu, torch::Tensor grad_in_o, // output gate torch::Tensor grad_ko, torch::Tensor grad_vo, torch::Tensor grad_bo ) { // const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(grad_queries)); torch::DeviceGuard _guard(grad_out.device()); int N = keys_i.size(0); int H = keys_i.size(1); int L = keys_i.size(2); int E = keys_i.size(3); int M = values_i.size(3); auto grad_kv_i = torch::zeros({N, H, E, M}, keys_i.options()); auto grad_kv_u = torch::zeros({N, H, E, M}, keys_i.options()); auto grad_kv_o = torch::zeros({N, H, E, M}, keys_i.options()); auto grad_h = torch::zeros({N, H, 1, M}, keys_i.options()); auto grad_c = torch::zeros({N, H, 1, M}, keys_i.options()); // const int threads = 1024; const int threads = 512; // avoid edge cases. // Gradient output gate ==================================== int MPB = min(threads, E*M); // make sure that MUL_PER_BLOCK is divisible by M; MPB = int(MPB / M) * M; const int subblocks_per_seq_value = ((E*M) + MPB - 1)/ MPB; const int E_per_subblock = MPB / M; const int blocks_value = N*H; const int E_block = E_per_subblock * subblocks_per_seq_value; // see kernel int shared_mem_const = (6 * E_block + 9 + 3)*M + 4; int shared_mem_per_time = (12 + 3) * M + 3 * E_block + 3; // Max shared memory size: // 12 * 1024 * 4 (float) = 49152 (48KB) // for Turing: 65536 (64KB) // for Volta: 98304 (96KB) int maxB; int device_id = 0; // assume all devices to be the same type as device 0. // int device_id = keys_i.device(); // Should to be faster than `hipGetDeviceProperties` according to: https://developer.nvidia.com/blog/cuda-pro-tip-the-fast-way-to-query-device-properties/ hipDeviceGetAttribute(&maxB, hipDeviceAttributeSharedMemPerBlockOptin, device_id); // std::cout << "Max shared mem: " << maxB << std::endl; int maxF = maxB / sizeof(float); // Following is needed for sm > 48KB hipFuncSetAttribute(fast_lstm_v4_backward_kernel, hipFuncAttributeMaxDynamicSharedMemorySize, maxB); // hipDeviceProp_t prop; // hipGetDeviceProperties(&prop, 0); assert(maxF - shared_mem_const > 0 && "`d_head` too large. To obtain large models, keep `d_head` small" "e.g. 16 and increase the number of heads instead."); // std::cout << "Max shared mem: " << maxF * sizeof(float) << std::endl; // std::cout << "Shared mem const (float): " << // shared_mem_const * sizeof(float) << std::endl; // std::cout << "Remainder: " << maxF - shared_mem_const << std::endl; // std::cout << "Shared per time: " << shared_mem_per_time << std::endl; const int T = int((maxF - shared_mem_const) / shared_mem_per_time); const int shared_mem_backward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float); for (int l_offset=0; l_offset < L; l_offset += T) { hipLaunchKernelGGL(( fast_lstm_v4_backward_kernel) , dim3(blocks_value), dim3(MPB), shared_mem_backward, 0, keys_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), outputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), o_delayed.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), cell_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), u_minus_c.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), gate_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), update_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), gate_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_h.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_c.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), fw_mem_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), fw_mem_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), fw_mem_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_kv_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_kv_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_kv_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_in_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_ki.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_vi.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_bi.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_in_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_ku.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_vu.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_bu.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_in_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_ko.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_vo.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_bo.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M, E_per_subblock, subblocks_per_seq_value, T, l_offset ); } } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "fast_lstm_v4_forward", &fast_lstm_v4_forward, "Compute the weighted sum of values but attending only to previous " "values." ); m.def( "fast_lstm_v4_backward", &fast_lstm_v4_backward, "Compute the gradients for the fast weight memory." ); }
9a7aa1f56e078978b728226ae6c9632d01c0270b.cu
// Original code from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/causal_product_cuda.cu // Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/ // Written by Angelos Katharopoulos <[email protected]>, // Apoorv Vyas <[email protected]> // // Modified to implement the fast weight LSTM V3 with FWM update rule*. // v3 = v2 + res. connection from feed-forward part of the pre-act update term. // Copyright (c) 2021 Kazuki Irie #include <torch/extension.h> // #include <c10/cuda/CUDAGuard.h> // #include <iostream> typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor; // sigmoid __device__ float sgmf(float x) { return 1.f / (1.f + expf(-x)); } // Forward kernel for fast weight LSTM: // - coupled input-forget gate. // - no peephole connections. // - all activations are sigmoid to get positive recurrent queries. // Equations; for input z_t ... __global__ void fast_lstm_v4_forward_kernel( const float_accessor inputs_i, // input gate const float_accessor keys_i, const float_accessor values_i, const float_accessor betas_i, const float_accessor inputs_u, // update candidate const float_accessor keys_u, const float_accessor values_u, const float_accessor betas_u, const float_accessor inputs_o, // output gate const float_accessor keys_o, const float_accessor values_o, const float_accessor betas_o, float_accessor states, float_accessor cells, float_accessor kv_i, float_accessor kv_u, float_accessor kv_o, float_accessor result, float_accessor res_del_nmz, float_accessor res_cell, float_accessor gate_i, float_accessor update_u, float_accessor gate_o, float_accessor v_old_i, float_accessor v_old_u, float_accessor v_old_o, const int N, const int H, const int L, const int E, const int M, const int E_per_subblock, const int subblocks_per_seq, const int T, // block chunk size in time dim. const int l_offset // multiple of T, length offset. ) { // Each block takes care of one sequence. // blockIdx.x = n * H + h int n = blockIdx.x / H; // batch id int h = blockIdx.x % H; // head id // threadIdx.x = e_local*M + m // Local e coordinate within E_per_subblock sub-block. int e_local = threadIdx.x / M; int m = threadIdx.x % M; const int E_block = subblocks_per_seq * E_per_subblock; // Load the shared memory const int shared_kv_size = E_block * M; extern __shared__ float shared_mem[]; float* shared_kv_i = shared_mem; float* shared_kv_u = shared_kv_i + shared_kv_size; float* shared_kv_o = shared_kv_u + shared_kv_size; float* shared_states = shared_kv_o + shared_kv_size; float* shared_cells = shared_states + M; float* shared_gate_i = shared_cells + M; float* shared_update = shared_gate_i + M; float* shared_gate_o = shared_update + M; float* shared_v_old_i = shared_gate_o + M; float* shared_v_old_u = shared_v_old_i + M; float* shared_v_old_o = shared_v_old_u + M; float* shared_betas_i = shared_v_old_o + M; float* shared_betas_u = shared_betas_i + T; float* shared_betas_o = shared_betas_u + T; float* softmax_denom = shared_betas_o + T; float* max_value = softmax_denom + 1; float* shared_values_i = max_value + 1; // input gate float* shared_keys_i = shared_values_i + M*T; float* shared_inputs_i = shared_keys_i + E_block*T; float* shared_values_u = shared_inputs_i + M*T; // update candidate float* shared_keys_u = shared_values_u + M*T; float* shared_inputs_u = shared_keys_u + E_block*T; float* shared_values_o = shared_inputs_u + M*T; // output gate float* shared_keys_o = shared_values_o + M*T; float* shared_inputs_o = shared_keys_o + E_block*T; const float eps = 1e-6; if (threadIdx.x < M) { // m = threadIdx.x if threadIdx.x < M. // shared_results[m] = 0.f; shared_update[m] = 0.f; shared_gate_i[m] = 0.f; shared_gate_o[m] = 0.f; shared_v_old_i[m] = 0.f; shared_v_old_u[m] = 0.f; shared_v_old_o[m] = 0.f; } if (threadIdx.x < 1) { softmax_denom[0] = 0.f; max_value[0] = 0.f; } // the last segment is shorter. int t_end = (T + l_offset) <= L ? T : L - l_offset; for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x) { int t = int(i / M) + l_offset; int d = i % M; shared_values_i[i] = values_i[n][h][t][d]; shared_inputs_i[i] = inputs_i[n][h][t][d]; shared_values_u[i] = values_u[n][h][t][d]; shared_inputs_u[i] = inputs_u[n][h][t][d]; shared_values_o[i] = values_o[n][h][t][d]; shared_inputs_o[i] = inputs_o[n][h][t][d]; } for (int i = threadIdx.x; i < (t_end*E_block); i += blockDim.x) { int t = int(i / E_block) + l_offset; int d = (i % E_block); if (d < E) { shared_keys_i[i] = keys_i[n][h][t][d]; shared_keys_u[i] = keys_u[n][h][t][d]; shared_keys_o[i] = keys_o[n][h][t][d]; } } for (int i = threadIdx.x; i < t_end; i += blockDim.x) { int t = i + l_offset; shared_betas_i[i] = betas_i[n][h][t][0]; shared_betas_u[i] = betas_u[n][h][t][0]; shared_betas_o[i] = betas_o[n][h][t][0]; } __syncthreads(); if (n >= N) { return; } int e; int kv_idx; for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { shared_kv_i[kv_idx] = kv_i[n][h][e][m]; shared_kv_u[kv_idx] = kv_u[n][h][e][m]; shared_kv_o[kv_idx] = kv_o[n][h][e][m]; } } // init variables if (threadIdx.x < M) { // initialize RNN state shared_states[m] = states[n][h][0][m]; shared_cells[m] = cells[n][h][0][m]; } int e_abs; float resi, resu, reso; float max_val, tmp_max; // float res_v_old_i, res_v_old_u, res_v_old_o; for (int t=0; t<t_end; t++) { // loop over time in the segment int l = t + l_offset; // absolute position in time int m_abs = t*M + m; // For stable softmax if (threadIdx.x < 1) { // Not parallelized! this should be improved! max_val = shared_states[0]; for (int i = 1; i < M; i++) { tmp_max = shared_states[i]; if (tmp_max > max_val) { max_val = tmp_max; } } max_value[0] = max_val; } __syncthreads(); // compute denominator for softmax if (threadIdx.x < M) { shared_states[m] = expf(shared_states[m] - max_value[0]); atomicAdd( &softmax_denom[0], shared_states[m] ); } __syncthreads(); for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // get old value float res_v_old_i = shared_kv_i[kv_idx] * shared_keys_i[e_abs]; atomicAdd( &shared_v_old_i[m], res_v_old_i ); float res_v_old_u = shared_kv_u[kv_idx] * shared_keys_u[e_abs]; atomicAdd( &shared_v_old_u[m], res_v_old_u ); float res_v_old_o = shared_kv_o[kv_idx] * shared_keys_o[e_abs]; atomicAdd( &shared_v_old_o[m], res_v_old_o ); } } __syncthreads(); // compute new value to be inserted float v_insert_i = shared_betas_i[t] * (shared_values_i[m_abs] - shared_v_old_i[m]); float v_insert_u = shared_betas_u[t] * (shared_values_u[m_abs] - shared_v_old_u[m]); float v_insert_o = shared_betas_o[t] * (shared_values_o[m_abs] - shared_v_old_o[m]); for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // Update all fast weights shared_kv_i[kv_idx] += shared_keys_i[e_abs] * v_insert_i; shared_kv_u[kv_idx] += shared_keys_u[e_abs] * v_insert_u; shared_kv_o[kv_idx] += shared_keys_o[e_abs] * v_insert_o; float soft_out = shared_states[e] / (softmax_denom[0] + eps); // Compute recurrent preactivation terms resi = soft_out * shared_kv_i[kv_idx]; atomicAdd( &shared_gate_i[m], resi ); resu = soft_out * shared_kv_u[kv_idx]; atomicAdd( &shared_update[m], resu ); reso = soft_out * shared_kv_o[kv_idx]; atomicAdd( &shared_gate_o[m], reso ); } } __syncthreads(); float out, new_cell; if (threadIdx.x < M) { // m = threadIdx.x if threadIdx.x < M // Ideally skip this for eval. Saving for bwd pass. float tmp = shared_states[m] / (softmax_denom[0] + eps); atomicAdd( &res_del_nmz[n][h][l][m], tmp ); // sigmoid shared_gate_i[m] = sgmf(shared_gate_i[m] + shared_inputs_i[m_abs]); // FINDME v4 shared_update[m] = shared_update[m] + shared_inputs_u[m_abs]; shared_gate_o[m] = sgmf(shared_gate_o[m] + shared_inputs_o[m_abs]); new_cell = shared_gate_i[m] * shared_update[m] + (1.f - shared_gate_i[m]) * shared_cells[m]; out = shared_gate_o[m] * new_cell; // out = expf(shared_gate_o[m] * new_cell); // atomicAdd( // &softmax_denom[0], // out // ); // write back intermediate results to be used for backward pass. atomicAdd( &result[n][h][l][m], out ); shared_states[m] = out; // state update atomicAdd( &res_cell[n][h][l][m], new_cell ); shared_cells[m] = new_cell; float out_i = shared_gate_i[m]; atomicAdd( &gate_i[n][h][l][m], out_i ); float out_u = shared_update[m]; atomicAdd( &update_u[n][h][l][m], out_u ); float out_o = shared_gate_o[m]; atomicAdd( &gate_o[n][h][l][m], out_o ); // initialize gates and update: shared_gate_i[m] = 0.f; shared_update[m] = 0.f; shared_gate_o[m] = 0.f; float r2i = shared_v_old_i[m]; atomicAdd( &v_old_i[n][h][l][m], r2i ); shared_v_old_i[m] = 0.f; float r2u = shared_v_old_u[m]; atomicAdd( &v_old_u[n][h][l][m], r2u ); shared_v_old_u[m] = 0.f; float r2o = shared_v_old_o[m]; atomicAdd( &v_old_o[n][h][l][m], r2o ); shared_v_old_o[m] = 0.f; } __syncthreads(); if (threadIdx.x < 1) { softmax_denom[0] = 0.f; } __syncthreads(); } __syncthreads(); // write back to kv to be carried over to the next segment. for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { kv_i[n][h][e][m] = shared_kv_i[kv_idx]; kv_u[n][h][e][m] = shared_kv_u[kv_idx]; kv_o[n][h][e][m] = shared_kv_o[kv_idx]; } } if (threadIdx.x < M) { states[n][h][0][m] = shared_states[m]; cells[n][h][0][m] = shared_cells[m]; } } // Forward void fast_lstm_v4_forward( const torch::Tensor inputs_i, // input gate const torch::Tensor keys_i, const torch::Tensor values_i, const torch::Tensor betas_i, const torch::Tensor inputs_u, // update const torch::Tensor keys_u, const torch::Tensor values_u, const torch::Tensor betas_u, const torch::Tensor inputs_o, // output gate const torch::Tensor keys_o, const torch::Tensor values_o, const torch::Tensor betas_o, torch::Tensor states, // init states torch::Tensor cells, // init cell states torch::Tensor kv_i, // might be non zero if carried over from previous seg torch::Tensor kv_u, torch::Tensor kv_o, torch::Tensor outputs, torch::Tensor nmz_delay, // softmax output delayed torch::Tensor cell_outs, torch::Tensor gate_i, torch::Tensor update_u, torch::Tensor gate_o, torch::Tensor v_old_i, torch::Tensor v_old_u, torch::Tensor v_old_o ) { // const at::cuda::OptionalCUDAGuard device_guard(device_of(queries)); torch::DeviceGuard _guard(inputs_i.device()); int N = inputs_i.size(0); int H = inputs_i.size(1); int L = inputs_i.size(2); int E = inputs_i.size(3); int M = values_i.size(3); // int threads = 1024; int threads = 512; // avoid edge cases. // Shared mem max size is 48KB int MUL_PER_BLOCK = min(threads, E*M); // make sure that MUL_PER_BLOCK is divisible by M; MUL_PER_BLOCK = int(MUL_PER_BLOCK / M) * M; threads = MUL_PER_BLOCK; const int subblocks_per_seq = ((E*M) + threads -1) / threads; const int E_per_subblock = MUL_PER_BLOCK / M; const int E_block = subblocks_per_seq * E_per_subblock; // int blocks = N*H*blocks_per_sequence; int blocks = N*H; // total number of sequences // 3 fast weight, 2 output/cells, 3 transforms, 3 for v_old, // 1 softmax denominator, +1 to store max for stable softmax. int shared_mem_const = (E_block * 3 + 5 + 3)*M + 1 + 1; // M for value, 2 * E for query and key. int shared_mem_per_time = 6*M + 3*E_block + 3; // Max shared memory size: // 12 * 1024 * 4 (float) = 49152 (48KB) // for Turing: 65536 (64KB) // for Volta: 98304 (96KB) int maxB; int device_id = 0; // int device_id = inputs_i.device(); // Should to be faster than `cudaGetDeviceProperties` according to: https://developer.nvidia.com/blog/cuda-pro-tip-the-fast-way-to-query-device-properties/ cudaDeviceGetAttribute(&maxB, cudaDevAttrMaxSharedMemoryPerBlockOptin, device_id); // std::cout << "Max shared mem: " << maxB << std::endl; int maxF = maxB / sizeof(float); // Following is needed for sm > 48KB cudaFuncSetAttribute(fast_lstm_v4_forward_kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, maxB); // cudaDeviceProp prop; // cudaGetDeviceProperties(&prop, 0); assert(maxF - shared_mem_const > 0 && "`d_head` too large. To obtain large models, keep `d_head` small" "e.g. 16 and increase the number of heads instead."); // std::cout << "Max shared mem: " << maxF * sizeof(float) << std::endl; // std::cout << "Shared mem const (float): " << // shared_mem_const * sizeof(float) << std::endl; // std::cout << "Remainder: " << maxF - shared_mem_const << std::endl; // std::cout << "Shared per time: " << shared_mem_per_time << std::endl; const int T = int((maxF - shared_mem_const) / shared_mem_per_time); const int shared_mem_forward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float); // std::cout << "Total used shared mem: " << shared_mem_forward << std::endl; for (int l_offset=0; l_offset < L; l_offset += T) { fast_lstm_v4_forward_kernel <<<blocks, MUL_PER_BLOCK, shared_mem_forward>>>( inputs_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), inputs_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), inputs_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), states.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), cells.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), kv_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), kv_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), kv_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), outputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), nmz_delay.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), cell_outs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), gate_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), update_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), gate_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M, E_per_subblock, subblocks_per_seq, T, l_offset ); } } // Backward kernel, output gate __global__ void fast_lstm_v4_backward_kernel( const float_accessor keys_i, const float_accessor values_i, const float_accessor betas_i, const float_accessor keys_u, const float_accessor values_u, const float_accessor betas_u, const float_accessor keys_o, const float_accessor values_o, const float_accessor betas_o, const float_accessor v_out_i, const float_accessor v_out_u, const float_accessor v_out_o, const float_accessor rnn_out, const float_accessor rnn_out_delayed, const float_accessor cell_out, const float_accessor u_minus_c, const float_accessor grad_out, const float_accessor gate_i, const float_accessor update_u, const float_accessor gate_o, float_accessor grad_h, // output tmp grad float_accessor grad_c, // cell tmp grad float_accessor kv_i, // kv memory from the forward pass float_accessor kv_u, float_accessor kv_o, float_accessor grad_kv_i, // kv temporal grad float_accessor grad_kv_u, float_accessor grad_kv_o, float_accessor grad_inputs_i, // input gate float_accessor grad_keys_i, float_accessor grad_values_i, float_accessor grad_betas_i, float_accessor grad_inputs_u, // update float_accessor grad_keys_u, float_accessor grad_values_u, float_accessor grad_betas_u, float_accessor grad_inputs_o, // output gate float_accessor grad_keys_o, float_accessor grad_values_o, float_accessor grad_betas_o, int N, int H, int L, int E, int M, int E_per_subblock, int subblocks_per_seq, int T, int l_offset ) { // Each block takes care of one sequence. // blockIdx.x = n * H + h int n = blockIdx.x / H; int h = blockIdx.x % H; // threadIdx.x = e_local*M + m // Local e coordinate within E_per_subblock sub-block. int e_local = threadIdx.x / M; int m = threadIdx.x % M; const int E_block = subblocks_per_seq * E_per_subblock; // Load the shared memory for KV const int shared_kv_size = E_block * M; extern __shared__ float shared_mem[]; float* shared_kv_i = shared_mem; float* shared_grad_kv_i = shared_mem + shared_kv_size; float* shared_kv_u = shared_grad_kv_i + shared_kv_size; float* shared_grad_kv_u = shared_kv_u + shared_kv_size; float* shared_kv_o = shared_grad_kv_u + shared_kv_size; float* shared_grad_kv_o = shared_kv_o + shared_kv_size; float* shared_res_zi = shared_grad_kv_o + shared_kv_size; float* shared_res_zu = shared_res_zi + M; float* shared_res_zo = shared_res_zu + M; float* shared_res_k_i = shared_res_zo + M; float* shared_res_k_u = shared_res_k_i + M; float* shared_res_k_o = shared_res_k_u + M; float* shared_res_v_i = shared_res_k_o + M; float* shared_res_v_u = shared_res_v_i + M; float* shared_res_v_o = shared_res_v_u + M; float* shared_grad_v_old_i = shared_res_v_o + M; float* shared_grad_v_old_u = shared_grad_v_old_i + M; float* shared_grad_v_old_o = shared_grad_v_old_u + M; float* shared_res_beta_i = shared_grad_v_old_o + M; float* shared_res_beta_u = shared_res_beta_i + 1; float* shared_res_beta_o = shared_res_beta_u + 1; float* grad_sft_cst = shared_res_beta_o + 1; float* shared_gradout = grad_sft_cst + 1; float* shared_keys_i = shared_gradout + M*T; float* shared_values_i = shared_keys_i + E_block*T; float* shared_keys_u = shared_values_i + M*T; float* shared_values_u = shared_keys_u + E_block*T; float* shared_keys_o = shared_values_u + M*T; float* shared_values_o = shared_keys_o + E_block*T; float* shared_rnn_out = shared_values_o + M*T; float* shared_rnn_out_delayed = shared_rnn_out + M*T; float* shared_c = shared_rnn_out_delayed + M*T; float* shared_u_m_c = shared_c + M*T; float* shared_gate_i = shared_u_m_c + M*T; float* shared_update = shared_gate_i + M*T; float* shared_gate_o = shared_update + M*T; float* shared_v_old_i = shared_gate_o + M*T; float* shared_v_old_u = shared_v_old_i + M*T; float* shared_v_old_o = shared_v_old_u + M*T; float* shared_betas_i = shared_v_old_o + M*T; float* shared_betas_u = shared_betas_i + T; float* shared_betas_o = shared_betas_u + T; float* shared_grad_h = shared_betas_o + T; float* shared_grad_c = shared_grad_h + M*T; if (threadIdx.x < M) { shared_res_zi[m] = 0.f; shared_res_zu[m] = 0.f; shared_res_zo[m] = 0.f; shared_res_k_i[m] = 0.f; shared_res_k_u[m] = 0.f; shared_res_k_o[m] = 0.f; shared_res_v_i[m] = 0.f; shared_res_v_u[m] = 0.f; shared_res_v_o[m] = 0.f; shared_grad_v_old_i[m] = 0.f; shared_grad_v_old_u[m] = 0.f; shared_grad_v_old_o[m] = 0.f; } if (threadIdx.x < 1) { shared_res_beta_i[0] = 0.f; shared_res_beta_u[0] = 0.f; shared_res_beta_o[0] = 0.f; grad_sft_cst[0] = 0.f; // offset for grad softmax } // Everythig goes backward int t_end = (T + l_offset) <= L ? T : L - l_offset; for (int i = threadIdx.x; i < (t_end*M); i += blockDim.x) { int t = int(i / M) + l_offset; int t_bw = L - 1 - t; int d = i % M; shared_gradout[i] = grad_out[n][h][t_bw][d]; shared_rnn_out[i] = rnn_out[n][h][t_bw][d]; shared_c[i] = cell_out[n][h][t_bw][d]; shared_u_m_c[i] = u_minus_c[n][h][t_bw][d]; shared_values_i[i] = values_i[n][h][t_bw][d]; shared_values_u[i] = values_u[n][h][t_bw][d]; shared_values_o[i] = values_o[n][h][t_bw][d]; shared_v_old_i[i] = v_out_i[n][h][t_bw][d]; shared_v_old_u[i] = v_out_u[n][h][t_bw][d]; shared_v_old_o[i] = v_out_o[n][h][t_bw][d]; shared_gate_i[i] = gate_i[n][h][t_bw][d]; shared_update[i] = update_u[n][h][t_bw][d]; shared_gate_o[i] = gate_o[n][h][t_bw][d]; } for (int i = threadIdx.x; i < (t_end*E_block); i += blockDim.x) { int t = int(i / E_block) + l_offset; int t_bw = L - 1 - t; int d = (i % E_block); if (d < E) { shared_rnn_out_delayed[i] = rnn_out_delayed[n][h][t_bw][d]; shared_keys_i[i] = keys_i[n][h][t_bw][d]; shared_keys_u[i] = keys_u[n][h][t_bw][d]; shared_keys_o[i] = keys_o[n][h][t_bw][d]; } } for (int i = threadIdx.x; i < t_end; i += blockDim.x) { int t = i + l_offset; int t_bw = L - 1 - t; shared_betas_i[i] = betas_i[n][h][t_bw][0]; shared_betas_u[i] = betas_u[n][h][t_bw][0]; shared_betas_o[i] = betas_o[n][h][t_bw][0]; } __syncthreads(); if (n >= N) { return; } int e; int e_abs; // absolute idx from t=0 int kv_idx; for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { shared_kv_i[kv_idx] = kv_i[n][h][e][m]; shared_grad_kv_i[kv_idx] = grad_kv_i[n][h][e][m]; shared_kv_u[kv_idx] = kv_u[n][h][e][m]; shared_grad_kv_u[kv_idx] = grad_kv_u[n][h][e][m]; shared_kv_o[kv_idx] = kv_o[n][h][e][m]; shared_grad_kv_o[kv_idx] = grad_kv_o[n][h][e][m]; } } if (threadIdx.x < M) { // threadIdx.x = m if threadIdx.x < M shared_grad_h[m] = grad_h[n][h][0][m]; shared_grad_c[m] = grad_c[n][h][0][m]; } for (int t=0; t<t_end; t++) { int l = t + l_offset; int l_b = L - l -1; int m_abs = t*M + m; if (threadIdx.x < M) { // element-wise ops only here // threadIdx.x = m if threadIdx.x < M shared_grad_h[m] += shared_gradout[m_abs]; // float grad_soft_input = // shared_rnn_out[m_abs] * (shared_grad_h[m] - grad_sft_cst[0]); // for output gate float grad_o = shared_c[m_abs] * shared_grad_h[m]; shared_res_zo[m] = grad_o * (1.f - shared_gate_o[m_abs]) * shared_gate_o[m_abs]; // grad c, no sigmoid shared_grad_c[m] += shared_gate_o[m_abs] * shared_grad_h[m]; // shared_grad_c[m] += shared_gate_o[m_abs] * shared_grad_h[m] // * sgmf(shared_c[m_abs]) * (1.f - sgmf(shared_c[m_abs])); shared_grad_h[m] = 0.f; // prepare grad h for the next step. } __syncthreads(); // important to sync float v_diff_i = shared_values_i[m_abs] - shared_v_old_i[m_abs]; float v_ins_i = v_diff_i * shared_betas_i[t]; float v_diff_u = shared_values_u[m_abs] - shared_v_old_u[m_abs]; float v_ins_u = v_diff_u * shared_betas_u[t]; float v_diff_o = shared_values_o[m_abs] - shared_v_old_o[m_abs]; float v_ins_o = v_diff_o * shared_betas_o[t]; // Output gate for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // grad rec weight shared_grad_kv_o[kv_idx] += shared_res_zo[m] * shared_rnn_out_delayed[e_abs]; // grad v float res_v_o = shared_grad_kv_o[kv_idx] * shared_keys_o[e_abs] * shared_betas_o[t]; atomicAdd( &shared_res_v_o[m], res_v_o ); // grad k part 1 and 2 float res_k_o = shared_grad_kv_o[kv_idx] * v_ins_o; atomicAdd( &shared_res_k_o[e], res_k_o ); // grad beta float res_b_o = shared_grad_kv_o[kv_idx] * shared_keys_o[e_abs] * v_diff_o; atomicAdd( &shared_res_beta_o[0], res_b_o ); // pass grad for the next time step. float res_h_o = shared_res_zo[m] * shared_kv_o[kv_idx]; atomicAdd( &shared_grad_h[e], res_h_o ); // contribution from output gate } } __syncthreads(); if (threadIdx.x < M) { // grad input gate float grad_i = shared_grad_c[m] * shared_u_m_c[m_abs]; shared_res_zi[m] = grad_i * (1.f - shared_gate_i[m_abs]) * shared_gate_i[m_abs]; // grad update shared_res_zu[m] = shared_grad_c[m] * shared_gate_i[m_abs]; // prepare grad c for the next time step shared_grad_c[m] = shared_grad_c[m] * (1.f - shared_gate_i[m_abs]); } __syncthreads(); // important to sync // Grad for input gate and update transformation for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // grad rec weight shared_grad_kv_i[kv_idx] += shared_res_zi[m] * shared_rnn_out_delayed[e_abs]; shared_grad_kv_u[kv_idx] += shared_res_zu[m] * shared_rnn_out_delayed[e_abs]; // grad v float res_v_i = shared_grad_kv_i[kv_idx] * shared_keys_i[e_abs] * shared_betas_i[t]; atomicAdd( &shared_res_v_i[m], res_v_i ); float res_v_u = shared_grad_kv_u[kv_idx] * shared_keys_u[e_abs] * shared_betas_u[t]; atomicAdd( &shared_res_v_u[m], res_v_u ); // grad k float res_k_i = shared_grad_kv_i[kv_idx] * v_ins_i; atomicAdd( &shared_res_k_i[e], res_k_i ); float res_k_u = shared_grad_kv_u[kv_idx] * v_ins_u; atomicAdd( &shared_res_k_u[e], res_k_u ); // grad beta float res_b_i = shared_grad_kv_i[kv_idx] * shared_keys_i[e_abs] * v_diff_i; atomicAdd( &shared_res_beta_i[0], res_b_i ); float res_b_u = shared_grad_kv_u[kv_idx] * shared_keys_u[e_abs] * v_diff_u; atomicAdd( &shared_res_beta_u[0], res_b_u ); // pass gradients to the next time step float res_h_i = shared_res_zi[m] * shared_kv_i[kv_idx]; atomicAdd( &shared_grad_h[e], res_h_i ); // contribution from input gate float res_h_u = shared_res_zu[m] * shared_kv_u[kv_idx]; atomicAdd( &shared_grad_h[e], res_h_u ); // contribution from update transformation } } __syncthreads(); // compute constant for grad softmax if (threadIdx.x < M) { float cst = shared_grad_h[m] * shared_rnn_out_delayed[m_abs]; atomicAdd( &grad_sft_cst[0], cst ); } __syncthreads(); if (threadIdx.x < M) { shared_grad_h[m] = shared_rnn_out_delayed[m_abs] * (shared_grad_h[m] - grad_sft_cst[0]); } if (threadIdx.x < 1) { grad_sft_cst[0] = 0.f; } for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // reverse update fast weight shared_kv_i[kv_idx] -= shared_keys_i[e_abs] * v_ins_i; shared_kv_u[kv_idx] -= shared_keys_u[e_abs] * v_ins_u; shared_kv_o[kv_idx] -= shared_keys_o[e_abs] * v_ins_o; // grad v_old float res_v_old_i = - (shared_grad_kv_i[kv_idx] * shared_betas_i[t] * shared_keys_i[e_abs]); atomicAdd( &shared_grad_v_old_i[m], res_v_old_i ); float res_v_old_u = - (shared_grad_kv_u[kv_idx] * shared_betas_u[t] * shared_keys_u[e_abs]); atomicAdd( &shared_grad_v_old_u[m], res_v_old_u ); float res_v_old_o = - (shared_grad_kv_o[kv_idx] * shared_betas_o[t] * shared_keys_o[e_abs]); atomicAdd( &shared_grad_v_old_o[m], res_v_old_o ); } } __syncthreads(); // remaining key grad for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; e_abs = t*E_block + e; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { // Input gate float res_kp3_i = shared_grad_v_old_i[m] * shared_kv_i[kv_idx]; atomicAdd( &shared_res_k_i[e], res_kp3_i ); // remaining key grad // grad kv via v old shared_grad_kv_i[kv_idx] += shared_grad_v_old_i[m] * shared_keys_i[e_abs]; // Update transform float res_kp3_u = shared_grad_v_old_u[m] * shared_kv_u[kv_idx]; atomicAdd( &shared_res_k_u[e], res_kp3_u ); // remaining key grad // grad kv via v old shared_grad_kv_u[kv_idx] += shared_grad_v_old_u[m] * shared_keys_u[e_abs]; // Output gate float res_kp3_o = shared_grad_v_old_o[m] * shared_kv_o[kv_idx]; atomicAdd( &shared_res_k_o[e], res_kp3_o ); // remaining key grad // grad kv via v old shared_grad_kv_o[kv_idx] += shared_grad_v_old_o[m] * shared_keys_o[e_abs]; } } __syncthreads(); if (threadIdx.x < M) { // m = threadIdx.x if threadIdx.x < M // feed-forward part float rzi = shared_res_zi[m]; atomicAdd( &grad_inputs_i[n][h][l_b][m], rzi ); float rzu = shared_res_zu[m]; atomicAdd( &grad_inputs_u[n][h][l_b][m], rzu ); float rzo = shared_res_zo[m]; atomicAdd( &grad_inputs_o[n][h][l_b][m], rzo ); // keys float rki = shared_res_k_i[m]; atomicAdd( &grad_keys_i[n][h][l_b][m], rki ); float rku = shared_res_k_u[m]; atomicAdd( &grad_keys_u[n][h][l_b][m], rku ); float rko = shared_res_k_o[m]; atomicAdd( &grad_keys_o[n][h][l_b][m], rko ); // values float rvi = shared_res_v_i[m]; atomicAdd( &grad_values_i[n][h][l_b][m], rvi ); float rvu = shared_res_v_u[m]; atomicAdd( &grad_values_u[n][h][l_b][m], rvu ); float rvo = shared_res_v_o[m]; atomicAdd( &grad_values_o[n][h][l_b][m], rvo ); // reset shared_res_k_i[m] = 0.f; shared_res_k_u[m] = 0.f; shared_res_k_o[m] = 0.f; shared_res_v_i[m] = 0.f; shared_res_v_u[m] = 0.f; shared_res_v_o[m] = 0.f; shared_grad_v_old_i[m] = 0.f; shared_grad_v_old_u[m] = 0.f; shared_grad_v_old_o[m] = 0.f; } __syncthreads(); if (threadIdx.x < 1) { // input atomicAdd( &grad_betas_i[n][h][l_b][0], shared_res_beta_i[0] ); shared_res_beta_i[0] = 0.f; // update atomicAdd( &grad_betas_u[n][h][l_b][0], shared_res_beta_u[0] ); shared_res_beta_u[0] = 0.f; // output gate atomicAdd( &grad_betas_o[n][h][l_b][0], shared_res_beta_o[0] ); shared_res_beta_o[0] = 0.f; } __syncthreads(); } __syncthreads(); // write back temporal gradients. for (int sub=0; sub<subblocks_per_seq; sub++) { e = sub * E_per_subblock + e_local; kv_idx = threadIdx.x + sub * blockDim.x; if (e < E) { kv_i[n][h][e][m] = shared_kv_i[kv_idx]; grad_kv_i[n][h][e][m] = shared_grad_kv_i[kv_idx]; kv_u[n][h][e][m] = shared_kv_u[kv_idx]; grad_kv_u[n][h][e][m] = shared_grad_kv_u[kv_idx]; kv_o[n][h][e][m] = shared_kv_o[kv_idx]; grad_kv_o[n][h][e][m] = shared_grad_kv_o[kv_idx]; } } if (threadIdx.x < M) { // threadIdx.x = m if threadIdx.x < M grad_h[n][h][0][m] = shared_grad_h[m]; grad_c[n][h][0][m] = shared_grad_c[m]; } } // Backward pass // This is very shared_mem intensive for the standard LSTM... void fast_lstm_v4_backward( const torch::Tensor grad_out, const torch::Tensor keys_i, const torch::Tensor values_i, const torch::Tensor betas_i, const torch::Tensor keys_u, const torch::Tensor values_u, const torch::Tensor betas_u, const torch::Tensor keys_o, const torch::Tensor values_o, const torch::Tensor betas_o, const torch::Tensor v_old_i, const torch::Tensor v_old_u, const torch::Tensor v_old_o, const torch::Tensor outputs, const torch::Tensor o_delayed, const torch::Tensor cell_out, const torch::Tensor u_minus_c, const torch::Tensor gate_i, const torch::Tensor update_u, const torch::Tensor gate_o, torch::Tensor fw_mem_i, // from the forward pass. torch::Tensor fw_mem_u, torch::Tensor fw_mem_o, torch::Tensor grad_in_i, // input gate torch::Tensor grad_ki, torch::Tensor grad_vi, torch::Tensor grad_bi, torch::Tensor grad_in_u, // update torch::Tensor grad_ku, torch::Tensor grad_vu, torch::Tensor grad_bu, torch::Tensor grad_in_o, // output gate torch::Tensor grad_ko, torch::Tensor grad_vo, torch::Tensor grad_bo ) { // const at::cuda::OptionalCUDAGuard device_guard(device_of(grad_queries)); torch::DeviceGuard _guard(grad_out.device()); int N = keys_i.size(0); int H = keys_i.size(1); int L = keys_i.size(2); int E = keys_i.size(3); int M = values_i.size(3); auto grad_kv_i = torch::zeros({N, H, E, M}, keys_i.options()); auto grad_kv_u = torch::zeros({N, H, E, M}, keys_i.options()); auto grad_kv_o = torch::zeros({N, H, E, M}, keys_i.options()); auto grad_h = torch::zeros({N, H, 1, M}, keys_i.options()); auto grad_c = torch::zeros({N, H, 1, M}, keys_i.options()); // const int threads = 1024; const int threads = 512; // avoid edge cases. // Gradient output gate ==================================== int MPB = min(threads, E*M); // make sure that MUL_PER_BLOCK is divisible by M; MPB = int(MPB / M) * M; const int subblocks_per_seq_value = ((E*M) + MPB - 1)/ MPB; const int E_per_subblock = MPB / M; const int blocks_value = N*H; const int E_block = E_per_subblock * subblocks_per_seq_value; // see kernel int shared_mem_const = (6 * E_block + 9 + 3)*M + 4; int shared_mem_per_time = (12 + 3) * M + 3 * E_block + 3; // Max shared memory size: // 12 * 1024 * 4 (float) = 49152 (48KB) // for Turing: 65536 (64KB) // for Volta: 98304 (96KB) int maxB; int device_id = 0; // assume all devices to be the same type as device 0. // int device_id = keys_i.device(); // Should to be faster than `cudaGetDeviceProperties` according to: https://developer.nvidia.com/blog/cuda-pro-tip-the-fast-way-to-query-device-properties/ cudaDeviceGetAttribute(&maxB, cudaDevAttrMaxSharedMemoryPerBlockOptin, device_id); // std::cout << "Max shared mem: " << maxB << std::endl; int maxF = maxB / sizeof(float); // Following is needed for sm > 48KB cudaFuncSetAttribute(fast_lstm_v4_backward_kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, maxB); // cudaDeviceProp prop; // cudaGetDeviceProperties(&prop, 0); assert(maxF - shared_mem_const > 0 && "`d_head` too large. To obtain large models, keep `d_head` small" "e.g. 16 and increase the number of heads instead."); // std::cout << "Max shared mem: " << maxF * sizeof(float) << std::endl; // std::cout << "Shared mem const (float): " << // shared_mem_const * sizeof(float) << std::endl; // std::cout << "Remainder: " << maxF - shared_mem_const << std::endl; // std::cout << "Shared per time: " << shared_mem_per_time << std::endl; const int T = int((maxF - shared_mem_const) / shared_mem_per_time); const int shared_mem_backward = ((T*shared_mem_per_time) + shared_mem_const) * sizeof(float); for (int l_offset=0; l_offset < L; l_offset += T) { fast_lstm_v4_backward_kernel <<<blocks_value, MPB, shared_mem_backward>>>( keys_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), keys_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), betas_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), v_old_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), outputs.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), o_delayed.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), cell_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), u_minus_c.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), gate_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), update_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), gate_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_h.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_c.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), fw_mem_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), fw_mem_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), fw_mem_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_kv_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_kv_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_kv_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_in_i.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_ki.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_vi.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_bi.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_in_u.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_ku.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_vu.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_bu.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_in_o.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_ko.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_vo.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_bo.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), N, H, L, E, M, E_per_subblock, subblocks_per_seq_value, T, l_offset ); } } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "fast_lstm_v4_forward", &fast_lstm_v4_forward, "Compute the weighted sum of values but attending only to previous " "values." ); m.def( "fast_lstm_v4_backward", &fast_lstm_v4_backward, "Compute the gradients for the fast weight memory." ); }
c0cbb15841986bda2501c40a6d72e69b58ee8c6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } __device__ inline int max(int a, int b) { return a >= b ? a : b; } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_cuda_update_output( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int kT, int kH, int kW, int dT, int dH, int dW, int padT, int padH, int padW, bool count_include_pad, int offsetZ, int divisor_override) { int oCol = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oCol < output.size(3)) { accscalar_t sum = 0.0; int tstart = oFrame * dT - padT; int hstart = oRow * dH - padH; int wstart = oCol * dW - padW; int tend = min(tstart + kT, input.size(1) + padT); int hend = min(hstart + kH, input.size(2) + padH); int wend = min(wstart + kW, input.size(3) + padW); int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = max(tstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); tend = min(tend, input.size(1)); hend = min(hend, input.size(2)); wend = min(wend, input.size(3)); accscalar_t divide_factor; if (divisor_override) { divide_factor = static_cast<accscalar_t>(divisor_override); } else { if(count_include_pad) { divide_factor = static_cast<accscalar_t>(pool_size); } else { divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart)); } } int ti, hi, wi; for (ti = tstart; ti < tend; ++ti) { for (hi = hstart; hi < hend; ++hi) { for (wi = wstart; wi < wend; ++wi) { scalar_t val = input[slice][ti][hi][wi]; sum += val; } } } output[slice][oFrame][oRow][oCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum / divide_factor); } } // Inner-most loop size (kW) passed as template parameter for // performance reasons. // template<int KERNEL_WIDTH, typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_cuda_update_output( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int kT, int kH, int dT, int dH, int dW, int padT, int padH, int padW, bool count_include_pad, int offsetZ, int divisor_override) { int oCol = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oCol < output.size(3)) { accscalar_t sum = 0.0; int tstart = oFrame * dT - padT; int hstart = oRow * dH - padH; int wstart = oCol * dW - padW; int tend = min(tstart + kT, input.size(1) + padT); int hend = min(hstart + kH, input.size(2) + padH); int wend = min(wstart + KERNEL_WIDTH, input.size(3) + padW); int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = max(tstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); tend = min(tend, input.size(1)); hend = min(hend, input.size(2)); wend = min(wend, input.size(3)); accscalar_t divide_factor; if (divisor_override) { divide_factor = static_cast<accscalar_t>(divisor_override); } else { if(count_include_pad) { divide_factor = static_cast<accscalar_t>(pool_size); } else { divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart)); } } int ti, hi, wi; for (ti = tstart; ti < tend; ++ti) { for (hi = hstart; hi < hend; ++hi) { for (wi = wstart; wi < wend; ++wi) { scalar_t val = input[slice][ti][hi][wi]; sum += val; } } } output[slice][oFrame][oRow][oCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum / divide_factor); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_single_backward_out_frame_stride1( PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<scalar_t, 4> gradInput, int kT, int kH, int kW, accscalar_t normFactor, int offsetZ) { int iCol = blockIdx.x * blockDim.x + threadIdx.x; int iRow = blockIdx.y * blockDim.y + threadIdx.y; int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // input frame/time int slice = (blockIdx.z + offsetZ) / gradInput.size(1); // input slice/feature // guard against over-tiled threads if (iRow < gradInput.size(2) && iCol < gradInput.size(3)) { accscalar_t sum = 0.0; scalar_t *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)] [max(0, iRow - kH + 1)][max(0, iCol - kW + 1)]; int frameOffset = 0; for (int oFrame = max(0, iFrame - kT + 1); oFrame < min(iFrame + 1, gradOutput.size(1)); ++oFrame) { int rowOffset = frameOffset; for (int oRow = max(0, iRow - kH + 1); oRow < min(iRow + 1, gradOutput.size(2)); ++oRow) { int colOffset = rowOffset; for (int oCol = max(0, iCol - kW + 1); oCol < min(iCol + 1, gradOutput.size(3)); ++oCol) { sum += gOut[colOffset]; ++colOffset; } rowOffset += gradOutput.size(3); } frameOffset += gradOutput.size(2) * gradOutput.size(3); } gradInput[slice][iFrame][iRow][iCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum * normFactor); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_cuda_update_grad_input_atomic( PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<scalar_t, 4> gradInput, int kT, int kH, int kW, int dT, int dH, int dW, int padT, int padH, int padW, bool count_include_pad, int offsetZ, int divisor_override) { int oCol = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // gradOutput frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // gradOutput slice/feature // guard against over-tiled threads if (oRow < gradOutput.size(2) && oCol < gradOutput.size(3)) { int tstart = oFrame * dT - padT; int hstart = oRow * dH - padH; int wstart = oCol * dW - padW; int tend = min(tstart + kT, gradInput.size(1) + padT); int hend = min(hstart + kH, gradInput.size(2) + padH); int wend = min(wstart + kW, gradInput.size(3) + padW); int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = max(tstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); tend = min(tend, gradInput.size(1)); hend = min(hend, gradInput.size(2)); wend = min(wend, gradInput.size(3)); accscalar_t divide_factor; if (divisor_override) { divide_factor = static_cast<accscalar_t>(divisor_override); } else { if(count_include_pad) { divide_factor = static_cast<accscalar_t>(pool_size); } else { divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart)); } } scalar_t val = ScalarConvert<accscalar_t, scalar_t>::to( ScalarConvert<scalar_t, accscalar_t>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor); for (int iFrame = tstart; iFrame < tend; ++iFrame) { for (int iRow = hstart; iRow < hend; ++iRow) { for (int iCol = wstart; iCol < wend; ++iCol) { atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val); } } } } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_cuda_update_grad_input( PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<scalar_t, 4> gradInput, int kT, int kH, int kW, int dT, int dH, int dW, int padT, int padH, int padW, bool count_include_pad, int offsetZ, int divisor_override) { int oCol = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // gradOutput frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // gradOutput slice/feature // guard against over-tiled threads if (oRow < gradOutput.size(2) && oCol < gradOutput.size(3)) { int tstart = oFrame * dT - padT; int hstart = oRow * dH - padH; int wstart = oCol * dW - padW; int tend = min(tstart + kT, gradInput.size(1) + padT); int hend = min(hstart + kH, gradInput.size(2) + padH); int wend = min(wstart + kW, gradInput.size(3) + padW); int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = max(tstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); tend = min(tend, gradInput.size(1)); hend = min(hend, gradInput.size(2)); wend = min(wend, gradInput.size(3)); accscalar_t divide_factor; if (divisor_override) { divide_factor = static_cast<accscalar_t>(divisor_override); } else { if(count_include_pad) { divide_factor = static_cast<accscalar_t>(pool_size); } else { divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart)); } } scalar_t val = ScalarConvert<accscalar_t, scalar_t>::to( ScalarConvert<scalar_t, accscalar_t>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor); for (int iFrame = tstart; iFrame < tend; ++iFrame) { for (int iRow = hstart; iRow < hend; ++iRow) { for (int iCol = wstart; iCol < wend; ++iCol) { gradInput[slice][iFrame][iRow][iCol] = val; } } } } } #define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ hipLaunchKernelGGL(( avg_pool3d_cuda_update_output<KW, scalar_t, accscalar_t>) \ , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ work_input.packed_accessor64<scalar_t, 4>(), \ work_output.packed_accessor64<scalar_t, 4>(), \ kT, kH, \ dT, dH, dW, \ padT, padH, padW, \ count_include_pad, \ offsetZ, divisor); \ break void avg_pool3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input, "input", 2 }; checkAllSameGPU("avg_pool3d_out_cuda", {output_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "avg_pool3d: kernel_size must be a single int, or a tuple of three ints"); const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 3, "avg_pool3d: stride must be omitted, a single int, or a tuple of three ints"); const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "avg_pool3d: padding must be a single int, or a tuple of three ints"); const int padT = safe_downcast<int, int64_t>(padding[0]); const int padH = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[1]); const int padW = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); // if divisor==0 then we will ignore it int64_t divisor = 0; if (divisor_override.has_value()) { TORCH_CHECK(divisor_override.value() != 0, "divisor must be not zero"); divisor = divisor_override.value(); } const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, padT, padH, padW, 1, 1, 1, itime, iheight, iwidth, otime, oheight, owidth, /*check_input_size=*/ true); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); } Tensor work_input = input.contiguous(); Tensor work_output = output; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "avg_pool3d_out_cuda", [&] { using accscalar_t = acc_type<scalar_t, true>; int64_t totalZ = otime * nslices * nbatch; int64_t offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int64_t>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7); default: hipLaunchKernelGGL(( avg_pool3d_cuda_update_output<scalar_t, accscalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), work_input.packed_accessor64<scalar_t, 4>(), work_output.packed_accessor64<scalar_t, 4>(), kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ, divisor); break; } TORCH_CHECK(hipGetLastError() == hipSuccess, "avg_pool3d_out_cuda failed with error code ", hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } ); } #undef LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH void avg_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU("avg_pool3d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "avg_pool3d: kernel_size must be a single int, or a tuple of three ints"); const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 3, "avg_pool3d: stride must be omitted, a single int, or a tuple of three ints"); const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "avg_pool3d: padding must be a single int, or a tuple of three ints"); const int padT = safe_downcast<int, int64_t>(padding[0]); const int padH = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[1]); const int padW = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for gradOutput"); // if divisor==0 then we will ignore it int64_t divisor = 0; if (divisor_override.has_value()) { TORCH_CHECK(divisor_override.value() != 0, "divisor must be not zero"); divisor = divisor_override.value(); } // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); /* XXX shape check behavior from TH */ const int64_t otime_for_shape_check = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode); const int64_t oheight_for_shape_check = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode); const int64_t owidth_for_chape_check = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode); const bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW); avg_pool3d_backward_shape_check( input, gradOutput, nslices, kT, kH, kW, dT, dH, dW, padT, padH, padW, itime, iheight, iwidth, otime, oheight, owidth); Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); } // Optimizing for stride 1 is probably only of limited value, but this // specialization yields 3x speedup over the atomicAdd implementation. // Padding must be 0, otherwise, pool size may change. if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool3d_backward_out_frame_stride1", [&] { using accscalar_t = acc_type<scalar_t, true>; int64_t totalZ = itime * nslices * nbatch; int64_t offsetZ = 0; dim3 block(32, 8); accscalar_t divide_factor; if (divisor) { divide_factor = static_cast<accscalar_t>(divisor); } else { divide_factor = static_cast<accscalar_t>(kT * kH * kW); } while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(iwidth, static_cast<int64_t>(block.x)), cuda::ATenCeilDiv(iheight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( avg_pool3d_single_backward_out_frame_stride1<scalar_t, accscalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), work_grad_output.packed_accessor64<scalar_t, 4>(), work_grad_input.packed_accessor64<scalar_t, 4>(), kT, kH, kW, 1.0f/divide_factor, offsetZ); TORCH_CHECK(hipGetLastError() == hipSuccess, "avg_pool3d_backward_out_frame failed with error code ", hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool3d_backward_out_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; int64_t totalZ = otime * nslices * nbatch; int64_t offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int64_t>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); if (kernelsOverlap) { hipLaunchKernelGGL(( avg_pool3d_cuda_update_grad_input_atomic<scalar_t, accscalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), work_grad_output.packed_accessor64<scalar_t, 4>(), work_grad_input.packed_accessor64<scalar_t, 4>(), kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ, divisor); } else { hipLaunchKernelGGL(( avg_pool3d_cuda_update_grad_input<scalar_t, accscalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), work_grad_output.packed_accessor64<scalar_t, 4>(), work_grad_input.packed_accessor64<scalar_t, 4>(), kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ, divisor); } TORCH_CHECK(hipGetLastError() == hipSuccess, "avg_pool3d_backward_out_frame failed with error code ", hipGetLastError()); totalZ -= 65535; offsetZ += 65535; } } ); } } } // namespace Tensor& avg_pool3d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool3d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor avg_pool3d_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { Tensor output = at::empty({0}, input.options()); avg_pool3d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor& avg_pool3d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } Tensor avg_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); avg_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } } // at::native } // at
c0cbb15841986bda2501c40a6d72e69b58ee8c6b.cu
#include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } __device__ inline int max(int a, int b) { return a >= b ? a : b; } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_cuda_update_output( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int kT, int kH, int kW, int dT, int dH, int dW, int padT, int padH, int padW, bool count_include_pad, int offsetZ, int divisor_override) { int oCol = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oCol < output.size(3)) { accscalar_t sum = 0.0; int tstart = oFrame * dT - padT; int hstart = oRow * dH - padH; int wstart = oCol * dW - padW; int tend = min(tstart + kT, input.size(1) + padT); int hend = min(hstart + kH, input.size(2) + padH); int wend = min(wstart + kW, input.size(3) + padW); int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = max(tstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); tend = min(tend, input.size(1)); hend = min(hend, input.size(2)); wend = min(wend, input.size(3)); accscalar_t divide_factor; if (divisor_override) { divide_factor = static_cast<accscalar_t>(divisor_override); } else { if(count_include_pad) { divide_factor = static_cast<accscalar_t>(pool_size); } else { divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart)); } } int ti, hi, wi; for (ti = tstart; ti < tend; ++ti) { for (hi = hstart; hi < hend; ++hi) { for (wi = wstart; wi < wend; ++wi) { scalar_t val = input[slice][ti][hi][wi]; sum += val; } } } output[slice][oFrame][oRow][oCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum / divide_factor); } } // Inner-most loop size (kW) passed as template parameter for // performance reasons. // template<int KERNEL_WIDTH, typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_cuda_update_output( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int kT, int kH, int dT, int dH, int dW, int padT, int padH, int padW, bool count_include_pad, int offsetZ, int divisor_override) { int oCol = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature if (oRow < output.size(2) && oCol < output.size(3)) { accscalar_t sum = 0.0; int tstart = oFrame * dT - padT; int hstart = oRow * dH - padH; int wstart = oCol * dW - padW; int tend = min(tstart + kT, input.size(1) + padT); int hend = min(hstart + kH, input.size(2) + padH); int wend = min(wstart + KERNEL_WIDTH, input.size(3) + padW); int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = max(tstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); tend = min(tend, input.size(1)); hend = min(hend, input.size(2)); wend = min(wend, input.size(3)); accscalar_t divide_factor; if (divisor_override) { divide_factor = static_cast<accscalar_t>(divisor_override); } else { if(count_include_pad) { divide_factor = static_cast<accscalar_t>(pool_size); } else { divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart)); } } int ti, hi, wi; for (ti = tstart; ti < tend; ++ti) { for (hi = hstart; hi < hend; ++hi) { for (wi = wstart; wi < wend; ++wi) { scalar_t val = input[slice][ti][hi][wi]; sum += val; } } } output[slice][oFrame][oRow][oCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum / divide_factor); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_single_backward_out_frame_stride1( PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<scalar_t, 4> gradInput, int kT, int kH, int kW, accscalar_t normFactor, int offsetZ) { int iCol = blockIdx.x * blockDim.x + threadIdx.x; int iRow = blockIdx.y * blockDim.y + threadIdx.y; int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // input frame/time int slice = (blockIdx.z + offsetZ) / gradInput.size(1); // input slice/feature // guard against over-tiled threads if (iRow < gradInput.size(2) && iCol < gradInput.size(3)) { accscalar_t sum = 0.0; scalar_t *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)] [max(0, iRow - kH + 1)][max(0, iCol - kW + 1)]; int frameOffset = 0; for (int oFrame = max(0, iFrame - kT + 1); oFrame < min(iFrame + 1, gradOutput.size(1)); ++oFrame) { int rowOffset = frameOffset; for (int oRow = max(0, iRow - kH + 1); oRow < min(iRow + 1, gradOutput.size(2)); ++oRow) { int colOffset = rowOffset; for (int oCol = max(0, iCol - kW + 1); oCol < min(iCol + 1, gradOutput.size(3)); ++oCol) { sum += gOut[colOffset]; ++colOffset; } rowOffset += gradOutput.size(3); } frameOffset += gradOutput.size(2) * gradOutput.size(3); } gradInput[slice][iFrame][iRow][iCol] = ScalarConvert<accscalar_t, scalar_t>::to(sum * normFactor); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_cuda_update_grad_input_atomic( PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<scalar_t, 4> gradInput, int kT, int kH, int kW, int dT, int dH, int dW, int padT, int padH, int padW, bool count_include_pad, int offsetZ, int divisor_override) { int oCol = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // gradOutput frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // gradOutput slice/feature // guard against over-tiled threads if (oRow < gradOutput.size(2) && oCol < gradOutput.size(3)) { int tstart = oFrame * dT - padT; int hstart = oRow * dH - padH; int wstart = oCol * dW - padW; int tend = min(tstart + kT, gradInput.size(1) + padT); int hend = min(hstart + kH, gradInput.size(2) + padH); int wend = min(wstart + kW, gradInput.size(3) + padW); int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = max(tstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); tend = min(tend, gradInput.size(1)); hend = min(hend, gradInput.size(2)); wend = min(wend, gradInput.size(3)); accscalar_t divide_factor; if (divisor_override) { divide_factor = static_cast<accscalar_t>(divisor_override); } else { if(count_include_pad) { divide_factor = static_cast<accscalar_t>(pool_size); } else { divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart)); } } scalar_t val = ScalarConvert<accscalar_t, scalar_t>::to( ScalarConvert<scalar_t, accscalar_t>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor); for (int iFrame = tstart; iFrame < tend; ++iFrame) { for (int iRow = hstart; iRow < hend; ++iRow) { for (int iCol = wstart; iCol < wend; ++iCol) { atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val); } } } } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool3d_cuda_update_grad_input( PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<scalar_t, 4> gradInput, int kT, int kH, int kW, int dT, int dH, int dW, int padT, int padH, int padW, bool count_include_pad, int offsetZ, int divisor_override) { int oCol = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // gradOutput frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // gradOutput slice/feature // guard against over-tiled threads if (oRow < gradOutput.size(2) && oCol < gradOutput.size(3)) { int tstart = oFrame * dT - padT; int hstart = oRow * dH - padH; int wstart = oCol * dW - padW; int tend = min(tstart + kT, gradInput.size(1) + padT); int hend = min(hstart + kH, gradInput.size(2) + padH); int wend = min(wstart + kW, gradInput.size(3) + padW); int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); tstart = max(tstart, 0); hstart = max(hstart, 0); wstart = max(wstart, 0); tend = min(tend, gradInput.size(1)); hend = min(hend, gradInput.size(2)); wend = min(wend, gradInput.size(3)); accscalar_t divide_factor; if (divisor_override) { divide_factor = static_cast<accscalar_t>(divisor_override); } else { if(count_include_pad) { divide_factor = static_cast<accscalar_t>(pool_size); } else { divide_factor = static_cast<accscalar_t>((tend - tstart) * (hend - hstart) * (wend - wstart)); } } scalar_t val = ScalarConvert<accscalar_t, scalar_t>::to( ScalarConvert<scalar_t, accscalar_t>::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor); for (int iFrame = tstart; iFrame < tend; ++iFrame) { for (int iRow = hstart; iRow < hend; ++iRow) { for (int iCol = wstart; iCol < wend; ++iCol) { gradInput[slice][iFrame][iRow][iCol] = val; } } } } } #define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ avg_pool3d_cuda_update_output<KW, scalar_t, accscalar_t> \ <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \ work_input.packed_accessor64<scalar_t, 4>(), \ work_output.packed_accessor64<scalar_t, 4>(), \ kT, kH, \ dT, dH, dW, \ padT, padH, padW, \ count_include_pad, \ offsetZ, divisor); \ break void avg_pool3d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input, "input", 2 }; checkAllSameGPU("avg_pool3d_out_cuda", {output_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "avg_pool3d: kernel_size must be a single int, or a tuple of three ints"); const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 3, "avg_pool3d: stride must be omitted, a single int, or a tuple of three ints"); const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "avg_pool3d: padding must be a single int, or a tuple of three ints"); const int padT = safe_downcast<int, int64_t>(padding[0]); const int padH = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[1]); const int padW = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); // if divisor==0 then we will ignore it int64_t divisor = 0; if (divisor_override.has_value()) { TORCH_CHECK(divisor_override.value() != 0, "divisor must be not zero"); divisor = divisor_override.value(); } const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, padT, padH, padW, 1, 1, 1, itime, iheight, iwidth, otime, oheight, owidth, /*check_input_size=*/ true); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); } Tensor work_input = input.contiguous(); Tensor work_output = output; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "avg_pool3d_out_cuda", [&] { using accscalar_t = acc_type<scalar_t, true>; int64_t totalZ = otime * nslices * nbatch; int64_t offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int64_t>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); switch (kW) { LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(1); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(2); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(3); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(4); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(5); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(6); LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(7); default: avg_pool3d_cuda_update_output<scalar_t, accscalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( work_input.packed_accessor64<scalar_t, 4>(), work_output.packed_accessor64<scalar_t, 4>(), kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ, divisor); break; } TORCH_CHECK(cudaGetLastError() == cudaSuccess, "avg_pool3d_out_cuda failed with error code ", cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } ); } #undef LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH void avg_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU("avg_pool3d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "avg_pool3d: kernel_size must be a single int, or a tuple of three ints"); const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 3, "avg_pool3d: stride must be omitted, a single int, or a tuple of three ints"); const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "avg_pool3d: padding must be a single int, or a tuple of three ints"); const int padT = safe_downcast<int, int64_t>(padding[0]); const int padH = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[1]); const int padW = padding.size() == 1 ? padT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input"); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "non-empty 4D or 5D (batch mode) tensor expected for gradOutput"); // if divisor==0 then we will ignore it int64_t divisor = 0; if (divisor_override.has_value()) { TORCH_CHECK(divisor_override.value() != 0, "divisor must be not zero"); divisor = divisor_override.value(); } // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); /* XXX shape check behavior from TH */ const int64_t otime_for_shape_check = pooling_output_shape<int64_t>(itime, kT, padT, dT, 1, ceil_mode); const int64_t oheight_for_shape_check = pooling_output_shape<int64_t>(iheight, kH, padH, dH, 1, ceil_mode); const int64_t owidth_for_chape_check = pooling_output_shape<int64_t>(iwidth, kW, padW, dW, 1, ceil_mode); const bool kernelsOverlap = (dT < kT) || (dH < kH) || (dW < kW); avg_pool3d_backward_shape_check( input, gradOutput, nslices, kT, kH, kW, dT, dH, dW, padT, padH, padW, itime, iheight, iwidth, otime, oheight, owidth); Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); } // Optimizing for stride 1 is probably only of limited value, but this // specialization yields 3x speedup over the atomicAdd implementation. // Padding must be 0, otherwise, pool size may change. if (dT == 1 && dH == 1 && dW == 1 && padT == 0 && padH == 0 && padW == 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool3d_backward_out_frame_stride1", [&] { using accscalar_t = acc_type<scalar_t, true>; int64_t totalZ = itime * nslices * nbatch; int64_t offsetZ = 0; dim3 block(32, 8); accscalar_t divide_factor; if (divisor) { divide_factor = static_cast<accscalar_t>(divisor); } else { divide_factor = static_cast<accscalar_t>(kT * kH * kW); } while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(iwidth, static_cast<int64_t>(block.x)), cuda::ATenCeilDiv(iheight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); avg_pool3d_single_backward_out_frame_stride1<scalar_t, accscalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( work_grad_output.packed_accessor64<scalar_t, 4>(), work_grad_input.packed_accessor64<scalar_t, 4>(), kT, kH, kW, 1.0f/divide_factor, offsetZ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "avg_pool3d_backward_out_frame failed with error code ", cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool3d_backward_out_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; int64_t totalZ = otime * nslices * nbatch; int64_t offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int64_t>(block.x)), cuda::ATenCeilDiv(oheight, static_cast<int64_t>(block.y)), totalZ > 65535 ? 65535 : totalZ); if (kernelsOverlap) { avg_pool3d_cuda_update_grad_input_atomic<scalar_t, accscalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( work_grad_output.packed_accessor64<scalar_t, 4>(), work_grad_input.packed_accessor64<scalar_t, 4>(), kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ, divisor); } else { avg_pool3d_cuda_update_grad_input<scalar_t, accscalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( work_grad_output.packed_accessor64<scalar_t, 4>(), work_grad_input.packed_accessor64<scalar_t, 4>(), kT, kH, kW, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ, divisor); } TORCH_CHECK(cudaGetLastError() == cudaSuccess, "avg_pool3d_backward_out_frame failed with error code ", cudaGetLastError()); totalZ -= 65535; offsetZ += 65535; } } ); } } } // namespace Tensor& avg_pool3d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool3d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor avg_pool3d_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { Tensor output = at::empty({0}, input.options()); avg_pool3d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor& avg_pool3d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } Tensor avg_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); avg_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } } // at::native } // at
09049d3860d6db7ef83e6c6298a992bdc123bf84.hip
// !!! This is a file automatically generated by hipify!!! //The MIT License (MIT) //Copyright (c) 2016 Massachusetts Institute of Technology //Authors: Mike Gowanlock //This software has been created in projects supported by the US National //Science Foundation and NASA (PI: Pankratius, NSF ACI-1442997, NASA AIST-NNX15AG84G) //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files (the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions: //The above copyright notice and this permission notice shall be included in //all copies or substantial portions of the Software. //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN //THE SOFTWARE. //precompute direct neighbors with the GPU: #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "structs.h" #include <stdio.h> #include "kernel.h" #include <math.h> #include "GPU.h" #include <algorithm> #include "omp.h" #include <queue> //thrust #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <thrust/system/hip/execution_policy.h> //for streams for thrust (added with Thrust v1.8) //elements for the result set //FOR A SINGLE KERNEL INVOCATION //NOT FOR THE BATCHED ONE #define BUFFERELEM 300000000 //400000000-original (when removing the data from the device before putting it back for the sort) //FOR THE BATCHED EXECUTION: //#define BATCHTOTALELEM 1200000000 //THE TOTAL SIZE ALLOCATED ON THE HOST //THE NUMBER OF BATCHES AND THE SIZE OF THE BUFFER FOR EACH KERNEL EXECUTION ARE NOT RELATED TO THE TOTAL NUMBER //OF ELEMENTS (ABOVE). #define NUMBATCHES 20 #define BATCHBUFFERELEM 100000000 //THE SMALLER SIZE ALLOCATED ON THE DEVICE FOR EACH KERNEL EXECUTION #define GPUSTREAMS 3 //number of concurrent gpu streams using namespace std; //Uses the grid index to compute the direct neighbor table //uses shared memory //each grid cell is processed by a block of threads //IN THIS ONE, WE PASS INTO THE GPU THE MAXIMUM AMOUNT OF SHARED MEMORY REQUIRED TO STORE THE OVERLAPPING //DATA ELEMENTS void makeDistanceTableGPUGridIndexWithSMBlockDataAware(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, int * lookupArr, struct table * neighborTable, int * totalNeighbors, unsigned int maxNumSMDataItems) { //CUDA error code: hipError_t errCode; /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in main GPU method: N is: %u",*N);cout.flush(); //pinned memory for the database: struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*N)); //dont use pinned memory for the database, its slower than using hipMalloc //hipHostMalloc((void **) &database, sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); printf("\n !!in main GPU method: N is: %u",*N);cout.flush(); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// // //test print the index // for (int i=0; i<(*gridNumXCells)*(*gridNumYCells); i++) // { // printf("\nCell %d: min: %d, max: %d", i, index[i].indexmin, index[i].indexmax); // } int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=hipMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != hipSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=hipMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// //test print the lookup array: // for (int i=0; i<*N; i++) // { // printf("\nlookup %d: %d",i, lookupArr[i]); // } int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_lookupArr, sizeof(int)*(*N)); if(errCode != hipSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=hipMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// //NON-PINNED MEMORY FOR SINGLE KERNEL INVOCATION (NO BATCHING) //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=hipMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // hipHostMalloc((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // hipHostMalloc((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //epsilon errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //size of the database: unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //N (DATASET SIZE) errCode=hipMemcpy( dev_N, N, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //THE NUMBER OF THREADS //The number of threads is the blocksize * the number of grid cells //Therefore, each data item is not assigned to a single thread unsigned int * numgputhreads; numgputhreads=(unsigned int*)malloc(sizeof(unsigned int)); *numgputhreads=totalGridCells*BLOCKSIZE; unsigned int * dev_numThreads; dev_numThreads=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_numThreads, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: dev_numThreads Got error with code " << errCode << endl; } //Number of threads errCode=hipMemcpy( dev_numThreads, numgputhreads, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_numThreads Got error with code " << errCode << endl; } //THE AMOUNT OF SHARED MEMORY REQUIRED TO STORE THE OVERLAPPING POINTS OF A GIVEN ORIGIN CELL unsigned int * elemsSM; elemsSM=(unsigned int*)malloc(sizeof(unsigned int)); *elemsSM=maxNumSMDataItems; unsigned int * dev_elemsSM; dev_elemsSM=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_elemsSM, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: dev_elemsSM Got error with code " << errCode << endl; } //SHARED MEMORY ELEMENTS errCode=hipMemcpy( dev_elemsSM, elemsSM, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_elemsSM Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=hipMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=hipMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=hipMemcpy( dev_debug1, debug1, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_debug2, debug2, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } /////////////////////////////////// //END SET OTHER KERNEL PARAMETERS /////////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL /////////////////////////////////// //the total blocks is the number of grid cells const int TOTALBLOCKS=totalGridCells; printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //The third parameter in the kernel allocation is for dynamic shared memory. //We need shared memory for 3 arrays (2x doubles and 1x int) const int SIZE_SM=(2*(*elemsSM)*sizeof(double))+((*elemsSM)*sizeof(int)); printf("\nMemory requested for DYNAMIC shared memory (kb): %f",SIZE_SM/1024.0); hipLaunchKernelGGL(( kernelGridIndexSMBlockDataAware), dim3(TOTALBLOCKS), dim3(BLOCKSIZE), SIZE_SM , 0, dev_numThreads, dev_N, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt, dev_database, dev_elemsSM, dev_pointIDKey, dev_pointInDistValue); if ( hipSuccess != hipGetLastError() ){ printf( "\n\nERROR IN KERNEL LAUNCH!\nMIGHT BE TOO MUCH DYNAMIC SHARED MEMORY REQUESTED\n\n" ); } /////////////////////////////////// //END LAUNCH KERNEL /////////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",*cnt); } /* //copy the results, but only transfer the number of results, not the entire buffer errCode=hipMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting results from GPU Got error with code " << errCode << endl; } */ printf("\nIn block GPU method, Count is: %d",*cnt); *totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) errCode=hipMemcpy(debug1, dev_debug1, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=hipMemcpy(debug2, dev_debug2, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: hipFree(dev_N); hipFree(dev_numThreads); hipFree(dev_database); hipFree(dev_debug1); hipFree(dev_debug2); hipFree(dev_cnt); hipFree(dev_epsilon); //hipFree(dev_results); hipFree(dev_grid); hipFree(dev_lookupArr); hipFree(dev_gridNumXCells); hipFree(dev_gridNumYCells); hipFree(dev_gridMin_x); hipFree(dev_gridMin_y); //////////////////////////////////// //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //Uses the grid index to compute the direct neighbor table //uses shared memory //each grid cell is processed by a block of threads (set at compile time) void makeDistanceTableGPUGridIndexWithSMBlockDataOblivious(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, int * numNonEmptyCells, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, unsigned int * lookupArr, struct table * neighborTable, int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); //CUDA error code: hipError_t errCode; /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in main GPU method: N is: %u",*N);cout.flush(); //pinned memory for the database: struct point * database; //dont use pinned memory for the database, its slower than using hipMalloc database=(struct point*)malloc(sizeof(struct point)*(*N)); //hipHostMalloc((void **) &database, sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); printf("\n !!in main GPU method: N is: %u",*N);cout.flush(); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// // //test print the index // for (int i=0; i<(*gridNumXCells)*(*gridNumYCells); i++) // { // printf("\nCell %d: min: %d, max: %d", i, index[i].indexmin, index[i].indexmax); // } int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=hipMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != hipSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=hipMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// //test print the lookup array: // for (int i=0; i<*N; i++) // { // printf("\nlookup %d: %d",i, lookupArr[i]); // } int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_lookupArr, sizeof(int)*(*N)); if(errCode != hipSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=hipMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// //NON-PINNED MEMORY FOR SINGLE KERNEL INVOCATION (NO BATCHING) //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=hipMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // hipHostMalloc((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // hipHostMalloc((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //epsilon errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //size of the database: unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //N (DATASET SIZE) errCode=hipMemcpy( dev_N, N, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //THE NUMBER OF THREADS //The number of threads is the blocksize * the number of non-empty grid cells //Therefore, each data item is not assigned to a single thread unsigned int * numgputhreads; numgputhreads=(unsigned int*)malloc(sizeof(unsigned int)); *numgputhreads=(*numNonEmptyCells)*BLOCKSIZE; unsigned int * dev_numThreads; dev_numThreads=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_numThreads, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: dev_numThreads Got error with code " << errCode << endl; } //Number of threads errCode=hipMemcpy( dev_numThreads, numgputhreads, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_numThreads Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=hipMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=hipMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=hipMemcpy( dev_debug1, debug1, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_debug2, debug2, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } //////////////////////////////////////////// //the schedule //an array that tells each block what grid id to process //that way we only request the number of blocks that correspond to the number of non-empty cells unsigned int * schedule; schedule=(unsigned int*)malloc(sizeof(unsigned int)*(*numNonEmptyCells)); int nonemptycnt=0; for (int i=0; i<totalGridCells; i++) { if (index[i].indexmin!=-1) { schedule[nonemptycnt]=i; nonemptycnt++; } } unsigned int * dev_schedule; dev_schedule=(unsigned int*)malloc(sizeof(unsigned int)*(*numNonEmptyCells)); //allocate on the device errCode=hipMalloc((void**)&dev_schedule, sizeof(unsigned int)*(*numNonEmptyCells)); if(errCode != hipSuccess) { cout << "\nError: dev_schedule Got error with code " << errCode << endl; } //copy the schedule errCode=hipMemcpy( dev_schedule, schedule, sizeof(unsigned int)*(*numNonEmptyCells), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_schedule Got error with code " << errCode << endl; } //////////////////////////// //END THE SCHEDULE //////////////////////////// /////////////////////////////////// //END SET OTHER KERNEL PARAMETERS /////////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL /////////////////////////////////// //the total blocks is the number of grid cells const int TOTALBLOCKS=(*numNonEmptyCells); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel hipLaunchKernelGGL(( kernelGridIndexSMBlock), dim3(TOTALBLOCKS), dim3(BLOCKSIZE) , 0, 0, dev_numThreads, dev_N, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt, dev_database, dev_schedule, dev_pointIDKey, dev_pointInDistValue); if ( hipSuccess != hipGetLastError() ){ printf( "Error in kernel launch!\n" ); } /////////////////////////////////// //END LAUNCH KERNEL /////////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",*cnt); } //copy the results, but only transfer the number of results, not the entire buffer /* errCode=hipMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting results from GPU Got error with code " << errCode << endl; } */ printf("\nIn block GPU method, Count is: %d",*cnt); *totalNeighbors=(*cnt); double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX errCode=hipMemcpy(debug1, dev_debug1, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=hipMemcpy(debug2, dev_debug2, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: hipFree(dev_N); hipFree(dev_database); hipFree(dev_debug1); hipFree(dev_debug2); hipFree(dev_cnt); hipFree(dev_epsilon); // hipFree(dev_results); hipFree(dev_grid); hipFree(dev_lookupArr); hipFree(dev_gridNumXCells); hipFree(dev_gridNumYCells); hipFree(dev_gridMin_x); hipFree(dev_gridMin_y); hipFree(dev_numThreads); hipFree(dev_schedule); //////////////////////////////////// //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //Uses the grid index to compute the direct neighbor table //NO SHARED MEMORY PAGING void makeDistanceTableGPUGridIndex(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, unsigned int * lookupArr, struct table * neighborTable, int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); //CUDA error code: hipError_t errCode; /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in main GPU method: N is: %u",*N);cout.flush(); struct point * database; //pinned memory for the database: database=(struct point*)malloc(sizeof(struct point)*(*N)); //dont use pinned memory for the database, its slower than using hipMalloc //hipHostMalloc((void **) &database, sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); printf("\n !!in main GPU method: N is: %u",*N);cout.flush(); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// // //test print the index // for (int i=0; i<(*gridNumXCells)*(*gridNumYCells); i++) // { // printf("\nCell %d: min: %d, max: %d", i, index[i].indexmin, index[i].indexmax); // } int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=hipMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != hipSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=hipMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// //test print the lookup array: // for (int i=0; i<*N; i++) // { // printf("\nlookup %d: %d",i, lookupArr[i]); // } int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_lookupArr, sizeof(int)*(*N)); if(errCode != hipSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=hipMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// //ORIGINAL, TESTING PINNED MEMORY /* struct structresults * dev_results; struct structresults * results; errCode=hipMalloc((void **)&dev_results, sizeof(struct structresults)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(struct structresults)*BUFFERELEM)/(1024*1024*1024)); //host result allocation: results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); */ //PINNED MEMORY FOR THE RESULT SET /* struct structresults * dev_results; struct structresults * results; errCode=hipMalloc((void **)&dev_results, sizeof(struct structresults)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(struct structresults)*BUFFERELEM)/(1024*1024*1024)); //host result allocation: //results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); //pinned result set memory for the host hipHostMalloc((void **) &results, sizeof(struct structresults)*BUFFERELEM); */ //NON-PINNED MEMORY FOR SINGLE KERNEL INVOCATION (NO BATCHING) //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=hipMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // hipHostMalloc((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // hipHostMalloc((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //copy to device errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //size of the database: unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //copy N to device errCode=hipMemcpy( dev_N, N, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } printf("\n\nMODIFIED THIS FUNCTION TO ADD THE OFFSET FOR BATCHING AND THE BATCH NUMBER\n\n"); printf("\nWITH A SINGLE BATCH-- THE BATCH OFFSET IS SET TO 1 AND THE BATCH NUMBER IS SET TO 0."); //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof( unsigned int )); *batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_offset, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch *batchOffset=1; errCode=hipMemcpy( dev_offset, batchOffset, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)); *batchNumber=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)); //allocate on the device errCode=hipMalloc((void**)&dev_batchNumber, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_batchNumber, batchNumber, sizeof(unsigned int), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=hipMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=hipMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=hipMemcpy( dev_debug1, debug1, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_debug2, debug2, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } /////////////////////////////////// //END SET OTHER KERNEL PARAMETERS /////////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL /////////////////////////////////// const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel hipLaunchKernelGGL(( kernelGridIndex), dim3(TOTALBLOCKS), dim3(BLOCKSIZE) , 0, 0, dev_N, dev_offset, dev_batchNumber, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt, dev_database, dev_pointIDKey, dev_pointInDistValue); // errCode=hipDeviceSynchronize(); // cout <<"\n\nError from device synchronize: "<<errCode; cout <<"\n\nKERNEL LAUNCH RETURN: "<<hipGetLastError()<<endl<<endl; if ( hipSuccess != hipGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<hipSuccess<<endl<<endl; } /////////////////////////////////// //END LAUNCH KERNEL /////////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //dont get the result set because we leave it on the device for sorting //without transfering back to the host //first find the size of the number of results errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",*cnt); } //copy the results, but only transfer the number of results, not the entire buffer /* errCode=hipMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting results from GPU Got error with code " << errCode << endl; } */ *totalNeighbors=(*cnt); double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) // unsigned int * debug1; // debug1=(unsigned int*)malloc(sizeof(unsigned int)); // *debug1=0; // unsigned int * debug2; // debug2=(unsigned int*)malloc(sizeof(unsigned int)); // *debug2=0; double tStartdebug=omp_get_wtime(); errCode=hipMemcpy(debug1, dev_debug1, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=hipMemcpy(debug2, dev_debug2, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } double tEnddebug=omp_get_wtime(); printf("\nTime to retrieve debug values: %f", tEnddebug - tStartdebug); /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: double tFreeStart=omp_get_wtime(); hipFree(dev_N); hipFree(dev_database); hipFree(dev_debug1); hipFree(dev_debug2); hipFree(dev_cnt); hipFree(dev_epsilon); //hipFree(dev_results); hipFree(dev_grid); hipFree(dev_lookupArr); hipFree(dev_gridNumXCells); hipFree(dev_gridNumYCells); hipFree(dev_gridMin_x); hipFree(dev_gridMin_y); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); //////////////////////////////////// //hipDeviceSynchronize(); //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); // allocate space for the output //thrust::device_vector<int> sortedKeys(*cnt); //thrust::device_vector<int> sortedVals(*cnt); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); /* double tstartsort=omp_get_wtime(); //make a host vector initialized with the results that have been transfered from the GPU double sort_test1_start=omp_get_wtime(); //TESTING thrust::host_vector<structresults> hVectResults(results,results+(*cnt)); double sort_test1_end=omp_get_wtime(); //TESTING printf("\n Time to create the host vector: %f", sort_test1_end - sort_test1_start); //TESTING // for (int i=0;i<numResults;i++) // { // printf("\n host vector: %d, %d",hVectResults[i].pointID,hVectResults[i].pointInDist); // } // for (int i=0; i<numResults; i++) // { // structresults tmp; // tmp.pointID=0; // tmp.pointInDist=0; // hVectResults.push_back(tmp); // } //Now transfer the hostvector to the device: double sort_test2_start=omp_get_wtime(); //TESTING thrust::device_vector<structresults> dVectResults=hVectResults; double sort_test2_end=omp_get_wtime(); //TESTING printf("\n Time to create the device vector: %f", sort_test2_end - sort_test2_start); //TESTING //sort the device vector on the GPU try{ thrust::sort(dVectResults.begin(), dVectResults.end(),compareThrust()); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } // transfer the sorted results back to host thrust::copy(dVectResults.begin(), dVectResults.end(), hVectResults.begin()); double tendsort=omp_get_wtime(); printf("\nTime to sort on the GPU (grid index): %f",tendsort-tstartsort); */ //print GPU: // for (int i=0; i<(*cnt);i++) // { // printf("\nGPU elem: %d, data: %d",hVectResults[i].pointID,hVectResults[i].pointInDist); // } //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //In this function we batch the results off of the GPU to accomodate larger epsilon values //The results that come from the GPU are in the form of key/value pairs (in two arrays) //Key-a point, Value-a point within epsilon of the key //The batches are mapped to differing streams //Each batch requires its own memory space for the result set //So the number of buffers on the GPU for the results is the number of streams (GPUSTREAMS) //On the host, we use the same size buffers, and number of them, and then build part of the neighbor table with the batch //This is an alternative to making one large array from all of the batches, which would require a large //pinned cuda malloc which is very expensive. It also allows for multiple threads to concurrently build the //neighbor table and interleave GPU work with work on the CPU //Also, the number of batches is estimated by calling a kernel that samples the number of neighbours (1%) and then //estimates the total neighbors, which is used to calculate the total number of batches //To make sure each batch doesn't vary much, we use a strided scheme for each batch //Uses the grid index to compute the direct neighbor table //NO SHARED MEMORY PAGING void makeDistanceTableGPUGridIndexBatches(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, unsigned int * lookupArr, struct table * neighborTable, unsigned int * totalNeighbors) { //testing new neighbortable: struct neighborTableLookup * newNeighborTable; newNeighborTable=new neighborTableLookup[dataPoints->size()]; double tKernelResultsStart=omp_get_wtime(); //CUDA error code: hipError_t errCode; cout<<"\n** last error start of fn: "<<hipGetLastError(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * DBSIZE; DBSIZE=(unsigned int*)malloc(sizeof(unsigned int)); *DBSIZE=dataPoints->size(); printf("\n in main GPU method: DBSIZE is: %u",*DBSIZE);cout.flush(); struct point * database; //pinned memory for the database: database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //dont use pinned memory for the database, its slower than using hipMalloc //hipHostMalloc((void **) &database, sizeof(struct point)*(*DBSIZE)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*DBSIZE)); printf("\n !!in main GPU method: DBSIZE is: %u",*DBSIZE);cout.flush(); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database //we do this because the data points struct may contain other values than x and y for (int i=0; i<(*DBSIZE); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*DBSIZE), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// // //test print the index // for (int i=0; i<(*gridNumXCells)*(*gridNumYCells); i++) // { // printf("\nCell %d: min: %d, max: %d", i, index[i].indexmin, index[i].indexmax); // } int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=hipMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != hipSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=hipMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } printf("\nSize of index sent to GPU (GiB): %f", (double)sizeof(struct grid)*totalGridCells/(1024.0*1024.0*1024.0)); /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*DBSIZE)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_lookupArr, sizeof(int)*(*DBSIZE)); if(errCode != hipSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=hipMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*DBSIZE), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //total size of the result set as it's batched //this isnt sent to the GPU unsigned int * totalResultSetCnt; totalResultSetCnt=(unsigned int*)malloc(sizeof(unsigned int)); *totalResultSetCnt=0; //count values - for an individual kernel launch //need different count values for each stream unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } // errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); // if(errCode != hipSuccess) { // cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; // } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //copy to device errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //number of threads per gpu stream //THE NUMBER OF THREADS THAT ARE LAUNCHED IN A SINGLE KERNEL INVOCATION //CAN BE FEWER THAN THE NUMBER OF ELEMENTS IN THE DATABASE IF MORE THAN 1 BATCH unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: dev_N Got error with code " << errCode << endl; } //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_offset, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_batchNumber, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=hipMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=hipMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=hipMemcpy( dev_debug1, debug1, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_debug2, debug2, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } ////////////////////////////////////////////////////////// //ESTIMATE THE BUFFER SIZE AND NUMBER OF BATCHES ETC BY COUNTING THE NUMBER OF RESULTS //TAKE A SAMPLE OF THE DATA POINTS, NOT ALL OF THEM //Use sampleRate for this ///////////////////////////////////////////////////////// printf("\n\n***********************************\nEstimating Batches:"); //Parameters for the batch size estimation. double sampleRate=0.01; //sample 1% of the points in the dataset sampleRate=0.01. //Sample the entire dataset(no sampling) sampleRate=1 int offsetRate=1.0/sampleRate; printf("\nOffset: %d", offsetRate); ///////////////// //N-threads //////////////// double tstartbatchest=omp_get_wtime(); unsigned int * dev_N_batchEst; dev_N_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); unsigned int * N_batchEst; N_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); *N_batchEst=*DBSIZE*sampleRate; //allocate on the device errCode=hipMalloc((void**)&dev_N_batchEst, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: dev_N_batchEst Got error with code " << errCode << endl; } //copy N to device //N IS THE NUMBER OF THREADS errCode=hipMemcpy( dev_N_batchEst, N_batchEst, sizeof(unsigned int), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: N batchEST Got error with code " << errCode << endl; } ///////////// //count the result set size //////////// unsigned int * dev_cnt_batchEst; dev_cnt_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); unsigned int * cnt_batchEst; cnt_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); *cnt_batchEst=0; //allocate on the device errCode=hipMalloc((void**)&dev_cnt_batchEst, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } //copy cnt to device errCode=hipMemcpy( dev_cnt_batchEst, cnt_batchEst, sizeof(unsigned int), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } ////////////////// //SAMPLE OFFSET - TO SAMPLE THE DATA TO ESTIMATE THE TOTAL NUMBER OF KEY VALUE PAIRS ///////////////// //offset into the database when batching the results unsigned int * sampleOffset; sampleOffset=(unsigned int*)malloc(sizeof(unsigned int)); *sampleOffset=offsetRate; unsigned int * dev_sampleOffset; dev_sampleOffset=(unsigned int*)malloc(sizeof(unsigned int)); //allocate on the device errCode=hipMalloc((void**)&dev_sampleOffset, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: sample offset Got error with code " << errCode << endl; } //copy offset to device errCode=hipMemcpy( dev_sampleOffset, sampleOffset, sizeof(unsigned int), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } const int TOTALBLOCKSBATCHEST=ceil((1.0*(*DBSIZE)*sampleRate)/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKSBATCHEST); hipLaunchKernelGGL(( kernelGridIndexBatchEstimator), dim3(TOTALBLOCKSBATCHEST), dim3(BLOCKSIZE), 0, 0, dev_N_batchEst, dev_sampleOffset, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt_batchEst, dev_database); cout<<"\n** ERROR FROM KERNEL LAUNCH OF BATCH ESTIMATOR: "<<hipGetLastError(); // find the size of the number of results errCode=hipMemcpy( cnt_batchEst, dev_cnt_batchEst, sizeof(unsigned int), hipMemcpyDeviceToHost); if(errCode != hipSuccess) { cout << "\nError: getting cnt for batch estimate from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size for estimating the number of batches (sampled): %u",*cnt_batchEst); } hipFree(dev_cnt_batchEst); hipFree(dev_N_batchEst); hipFree(dev_sampleOffset); double tendbatchest=omp_get_wtime(); printf("\nTime to get the total result set size from batch estimator: %f",tendbatchest-tstartbatchest); //WE CALCULATE THE BUFFER SIZES AND NUMBER OF BATCHES unsigned int GPUBufferSize=100000000; double alpha=0.05; //overestimation factor unsigned long long estimatedTotalSize=(unsigned long long)(*cnt_batchEst)*(unsigned long long)offsetRate; unsigned long long estimatedTotalSizeWithAlpha=(unsigned long long)(*cnt_batchEst)*(unsigned long long)offsetRate*(1.0+(alpha)); printf("\nEstimated total result set size: %llu", estimatedTotalSize); printf("\nEstimated total result set size (with Alpha %f): %llu", alpha,estimatedTotalSizeWithAlpha); //to accomodate small datasets, we need smaller buffers because the pinned memory malloc is expensive if (estimatedTotalSize<(GPUBufferSize*GPUSTREAMS)) { GPUBufferSize=estimatedTotalSize*(1.0+(alpha*2.0))/(GPUSTREAMS); //we do 2*alpha for small datasets because the //sampling will be worse for small datasets //but we fix the 3 streams still (thats why divide by 3). } unsigned int numBatches=ceil(((1.0+alpha)*estimatedTotalSize*1.0)/(GPUBufferSize*1.0)); printf("\nNumber of batches: %d, buffer size: %d", numBatches, GPUBufferSize); printf("\nEnd Batch Estimator\n***********************************\n"); ///////////////////////////////////////////////////////// //END BATCH ESTIMATOR ///////////////////////////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET USING THE BATCH ESTIMATOR /////////////////////////////////// //NEED BUFFERS ON THE GPU AND THE HOST FOR THE NUMBER OF CONCURRENT STREAMS //GPU BUFFER ON THE DEVICE //BUFFER ON THE HOST WITH PINNED MEMORY FOR FAST MEMCPY //BUFFER ON THE HOST TO DUMP THE RESULTS OF BATCHES SO THAT GPU THREADS CAN CONTINUE //EXECUTING STREAMS ON THE HOST //GPU MEMORY ALLOCATION: //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey[GPUSTREAMS]; //key int * dev_pointInDistValue[GPUSTREAMS]; //value for (int i=0; i<GPUSTREAMS; i++) { errCode=hipMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } } //HOST RESULT ALLOCATION FOR THE GPU TO COPY THE DATA INTO A PINNED MEMORY ALLOCATION //ON THE HOST //pinned result set memory for the host //the number of elements are recorded for that batch in resultElemCountPerBatch //NEED PINNED MEMORY ALSO BECAUSE YOU NEED IT TO USE STREAMS IN THRUST FOR THE MEMCOPY OF THE SORTED RESULTS //PINNED MEMORY TO COPY FROM THE GPU int * pointIDKey[GPUSTREAMS]; //key int * pointInDistValue[GPUSTREAMS]; //value double tstartpinnedresults=omp_get_wtime(); for (int i=0; i<GPUSTREAMS; i++) { hipHostMalloc((void **) &pointIDKey[i], sizeof(int)*GPUBufferSize); hipHostMalloc((void **) &pointInDistValue[i], sizeof(int)*GPUBufferSize); } double tendpinnedresults=omp_get_wtime(); printf("\nTime to allocate pinned memory for results: %f", tendpinnedresults - tstartpinnedresults); // hipMalloc((void **) &pointIDKey, sizeof(int)*GPUBufferSize*NUMBATCHES); // hipMalloc((void **) &pointInDistValue, sizeof(int)*GPUBufferSize*NUMBATCHES); printf("\nmemory requested for results ON GPU (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); printf("\nmemory requested for results in MAIN MEMORY (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// ///////////////////////////////// //SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// omp_set_num_threads(GPUSTREAMS); ///////////////////////////////// //END SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// ///////////////////////////////// //CREATE STREAMS //////////////////////////////// hipStream_t stream[GPUSTREAMS]; for (int i=0; i<GPUSTREAMS; i++){ //hipStreamCreate(&stream[i]); hipStreamCreateWithFlags(&stream[i], hipStreamNonBlocking); } ///////////////////////////////// //END CREATE STREAMS //////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL IN BATCHES /////////////////////////////////// //since we use the strided scheme, some of the batch sizes //are off by 1 of each other, a first group of batches will //have 1 extra data point to process, and we calculate which batch numbers will //have that. The batchSize is the lower value (+1 is added to the first ones) unsigned int batchSize=(*DBSIZE)/numBatches; unsigned int batchesThatHaveOneMore=(*DBSIZE)-(batchSize*numBatches); //batch number 0- < this value have one more printf("\n\n***Batches that have one more: %u batchSize(N): %u, \n\n***",batchSize, batchesThatHaveOneMore); unsigned int totalResultsLoop=0; //FOR LOOP OVER THE NUMBER OF BATCHES STARTS HERE #pragma omp parallel for schedule(static,1) reduction(+:totalResultsLoop) num_threads(GPUSTREAMS) for (int i=0; i<numBatches; i++) { int tid=omp_get_thread_num(); printf("\ntid: %d, starting iteration: %d",tid,i); //N NOW BECOMES THE NUMBER OF POINTS TO PROCESS PER BATCH //AS ONE THREAD PROCESSES A SINGLE POINT if (i<batchesThatHaveOneMore) { N[tid]=batchSize+1; printf("\nN: %d, tid: %d",N[tid], tid); } else { N[tid]=batchSize; printf("\nN (1 less): %d tid: %d",N[tid], tid); } //set relevant parameters for the batched execution that get reset //copy N to device //N IS THE NUMBER OF THREADS errCode=hipMemcpyAsync( &dev_N[tid], &N[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //the batched result set size (reset to 0): cnt[tid]=0; errCode=hipMemcpyAsync( &dev_cnt[tid], &cnt[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch //batchOffset[tid]=i*batchSize; //original batchOffset[tid]=numBatches; //for the strided errCode=hipMemcpyAsync( &dev_offset[tid], &batchOffset[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //the batch number for batching with strided batchNumber[tid]=i; errCode=hipMemcpyAsync( &dev_batchNumber[tid], &batchNumber[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } const int TOTALBLOCKS=ceil((1.0*(N[tid]))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //0 is shared memory pool hipLaunchKernelGGL(( kernelGridIndex), dim3(TOTALBLOCKS), dim3(BLOCKSIZE), 0, stream[tid] , &dev_N[tid], &dev_offset[tid], &dev_batchNumber[tid], dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, &dev_cnt[tid], dev_database, dev_pointIDKey[tid], dev_pointInDistValue[tid]); // errCode=hipDeviceSynchronize(); // cout <<"\n\nError from device synchronize: "<<errCode; cout <<"\n\nKERNEL LAUNCH RETURN: "<<hipGetLastError()<<endl<<endl; if ( hipSuccess != hipGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<hipSuccess<<endl<<endl; } // find the size of the number of results errCode=hipMemcpyAsync( &cnt[tid], &dev_cnt[tid], sizeof(unsigned int), hipMemcpyDeviceToHost, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",cnt[tid]); } //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey[tid]); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue[tid]); //XXXXXXXXXXXXXXXX //THRUST USING STREAMS REQUIRES THRUST V1.8 //SEEMS TO BE WORKING :) //XXXXXXXXXXXXXXXX try{ thrust::sort_by_key(thrust::hip::par.on(stream[tid]), dev_keys_ptr, dev_keys_ptr + cnt[tid], dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //#pragma omp critical //{ //copy the sorted arays back to the host //copy to the appropriate place in the larger host arrays //original thrust copy (doesnt have streams) //thrust::copy(dev_keys_ptr,dev_keys_ptr+cnt[tid],pointIDKey+(*totalResultSetCnt)); //thrust::copy(dev_data_ptr,dev_data_ptr+cnt[tid],pointInDistValue+(*totalResultSetCnt)); //thrust with streams (but into one big buffer) //copy the data back using the streams //hipMemcpyAsync(thrust::raw_pointer_cast(pointIDKey+(*totalResultSetCnt)), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //hipMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue+(*totalResultSetCnt)), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //thrust with streams into individual buffers for each batch //hipMemcpyAsync(thrust::raw_pointer_cast(batchedResultSet[i].pointIDKey), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //hipMemcpyAsync(thrust::raw_pointer_cast(batchedResultSet[i].pointInDistValue), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //thrust with streams (but into one big buffer) where each batch can write to a different spot in the big buffer //as the big array is chunked into the gpu batch size //copy the data back using the streams //FOR PINNED MEMORY //hipMemcpyAsync(thrust::raw_pointer_cast(pointIDKey+(i*GPUBufferSize)), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //hipMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue+(i*GPUBufferSize)), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //thrust with streams (but into one big buffer) where each batch can write to a different spot in the big buffer //as the big array is chunked into the gpu batch size //FOR PAGED MEMORY -- cant use streams // hipMemcpy(thrust::raw_pointer_cast(pointIDKey+(i*GPUBufferSize)), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost); // hipMemcpy(thrust::raw_pointer_cast(pointInDistValue+(i*GPUBufferSize)), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost); //thrust with streams into individual buffers for each batch hipMemcpyAsync(thrust::raw_pointer_cast(pointIDKey[tid]), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); hipMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue[tid]), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //need to make sure the data is copied before constructing portion of the neighbor table hipStreamSynchronize(stream[tid]); double tableconstuctstart=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey[tid], pointInDistValue[tid], neighborTable, &cnt[tid]); double tableconstuctend=omp_get_wtime(); printf("\nTable construct time: %f", tableconstuctend - tableconstuctstart); //add the batched result set size to the total count totalResultsLoop+=cnt[tid]; printf("\nRunning total of total size of result array, tid: %d: %u", tid, totalResultsLoop); //} } //END LOOP OVER THE GPU BATCHES printf("\nTOTAL RESULT SET SIZE ON HOST: %u", totalResultsLoop); *totalNeighbors=totalResultsLoop; double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) // unsigned int * debug1; // debug1=(unsigned int*)malloc(sizeof(unsigned int)); // *debug1=0; // unsigned int * debug2; // debug2=(unsigned int*)malloc(sizeof(unsigned int)); // *debug2=0; double tStartdebug=omp_get_wtime(); errCode=hipMemcpy(debug1, dev_debug1, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=hipMemcpy(debug2, dev_debug2, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } double tEnddebug=omp_get_wtime(); printf("\nTime to retrieve debug values: %f", tEnddebug - tStartdebug); /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: double tFreeStart=omp_get_wtime(); //destroy streams // for (int i=0; i<GPUSTREAMS; i++) // { // hipStreamDestroy(stream[i]); // } for (int i=0; i<GPUSTREAMS; i++) { errCode=hipStreamDestroy(stream[i]); if(errCode != hipSuccess) { cout << "\nError: destroying stream" << errCode << endl; } } //free the data on the device hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); hipFree(dev_database); hipFree(dev_debug1); hipFree(dev_debug2); hipFree(dev_epsilon); hipFree(dev_grid); hipFree(dev_lookupArr); hipFree(dev_gridNumXCells); hipFree(dev_gridNumYCells); hipFree(dev_gridMin_x); hipFree(dev_gridMin_y); hipFree(dev_N); hipFree(dev_cnt); hipFree(dev_offset); hipFree(dev_batchNumber); //free data related to the individual streams for each batch for (int i=0; i<GPUSTREAMS; i++) { //free the data on the device hipFree(dev_pointIDKey[i]); hipFree(dev_pointInDistValue[i]); //free on the host hipHostFree(pointIDKey[i]); hipHostFree(pointInDistValue[i]); // errCode=hipMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); // if(errCode != hipSuccess) { // cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory // } // errCode=hipMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); // if(errCode != hipSuccess) { // cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory // } } hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); //free pinned memory on host hipHostFree(pointIDKey); hipHostFree(pointInDistValue); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); // printf("\nreturning before constructing table (which is commented)"); // return; //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// //NOW CONSTRUCT THE TABLE PARTIALLY WHEN MAKING THE BATCHES // double tStartTableConstruct=omp_get_wtime(); // constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, totalResultSetCnt); // double tEndTableConstruct=omp_get_wtime(); // printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //print table: /* int tmpcnt=0; printf("\nGrid GPU Table**********"); for (int i=0; i<(*DBSIZE); i++) { printf("\nPoint id: %d In distance: ", neighborTable[i].pointID); //sort so it has the same output: std::sort(neighborTable[i].neighbors.begin(),neighborTable[i].neighbors.end()); for (int j=0; j<neighborTable[i].neighbors.size();j++) { printf("%d, ",neighborTable[i].neighbors[j]); tmpcnt++; } } printf("\n count elems: %d", tmpcnt); */ //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// //printf("\ntotal neighbors (in batched fn): %d", *totalNeighbors); cout<<"\n** last error at end of fn batches: "<<hipGetLastError(); // printf("\nExiting function early.."); // return; } //In this function we batch the results off of the GPU to accomodate larger epsilon values //The results that come from the GPU are in the form of key/value pairs (in two arrays) //Key-a point, Value-a point within epsilon of the key //The batches are mapped to differing streams //Each batch requires its own memory space for the result set //So the number of buffers on the GPU for the results is the number of streams (GPUSTREAMS) //On the host, we use the same size buffers, and number of them, and then build part of the neighbor table with the batch //This is an alternative to making one large array from all of the batches, which would require a large //pinned cuda malloc which is very expensive. It also allows for multiple threads to concurrently build the //neighbor table and interleave GPU work with work on the CPU //Also, the number of batches is estimated by calling a kernel that samples the number of neighbours (1%) and then //estimates the total neighbors, which is used to calculate the total number of batches //To make sure each batch doesn't vary much, we use a strided scheme for each batch //Uses the grid index to compute the direct neighbor table //NO SHARED MEMORY PAGING void makeDistanceTableGPUGridIndexBatchesAlternateTest(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, unsigned int * lookupArr, struct neighborTableLookup * neighborTable, std::vector<struct neighborDataPtrs> * pointersToNeighbors, unsigned int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); //CUDA error code: hipError_t errCode; cout<<"\n** last error start of fn: "<<hipGetLastError(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * DBSIZE; DBSIZE=(unsigned int*)malloc(sizeof(unsigned int)); *DBSIZE=dataPoints->size(); printf("\n in main GPU method: DBSIZE is: %u",*DBSIZE);cout.flush(); struct point * database; //pinned memory for the database: database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //dont use pinned memory for the database, its slower than using hipMalloc //hipHostMalloc((void **) &database, sizeof(struct point)*(*DBSIZE)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*DBSIZE)); printf("\n !!in main GPU method: DBSIZE is: %u",*DBSIZE);cout.flush(); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database //we do this because the data points struct may contain other values than x and y for (int i=0; i<(*DBSIZE); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*DBSIZE), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=hipMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != hipSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=hipMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } printf("\nSize of index sent to GPU (GiB): %f", (double)sizeof(struct grid)*totalGridCells/(1024.0*1024.0*1024.0)); /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*DBSIZE)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_lookupArr, sizeof(int)*(*DBSIZE)); if(errCode != hipSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=hipMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*DBSIZE), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=hipMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //total size of the result set as it's batched //this isnt sent to the GPU unsigned int * totalResultSetCnt; totalResultSetCnt=(unsigned int*)malloc(sizeof(unsigned int)); *totalResultSetCnt=0; //count values - for an individual kernel launch //need different count values for each stream unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } // errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); // if(errCode != hipSuccess) { // cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; // } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //copy to device errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //number of threads per gpu stream //THE NUMBER OF THREADS THAT ARE LAUNCHED IN A SINGLE KERNEL INVOCATION //CAN BE FEWER THAN THE NUMBER OF ELEMENTS IN THE DATABASE IF MORE THAN 1 BATCH unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: dev_N Got error with code " << errCode << endl; } //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_offset, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_batchNumber, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=hipMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=hipMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=hipMemcpy( dev_debug1, debug1, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_debug2, debug2, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } ////////////////////////////////////////////////////////// //ESTIMATE THE BUFFER SIZE AND NUMBER OF BATCHES ETC BY COUNTING THE NUMBER OF RESULTS //TAKE A SAMPLE OF THE DATA POINTS, NOT ALL OF THEM //Use sampleRate for this ///////////////////////////////////////////////////////// printf("\n\n***********************************\nEstimating Batches:"); //Parameters for the batch size estimation. double sampleRate=0.01; //sample 1% of the points in the dataset sampleRate=0.01. //Sample the entire dataset(no sampling) sampleRate=1 int offsetRate=1.0/sampleRate; printf("\nOffset: %d", offsetRate); ///////////////// //N-threads //////////////// double tstartbatchest=omp_get_wtime(); unsigned int * dev_N_batchEst; dev_N_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); unsigned int * N_batchEst; N_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); *N_batchEst=*DBSIZE*sampleRate; //allocate on the device errCode=hipMalloc((void**)&dev_N_batchEst, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: dev_N_batchEst Got error with code " << errCode << endl; } //copy N to device //N IS THE NUMBER OF THREADS errCode=hipMemcpy( dev_N_batchEst, N_batchEst, sizeof(unsigned int), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: N batchEST Got error with code " << errCode << endl; } ///////////// //count the result set size //////////// unsigned int * dev_cnt_batchEst; dev_cnt_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); unsigned int * cnt_batchEst; cnt_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); *cnt_batchEst=0; //allocate on the device errCode=hipMalloc((void**)&dev_cnt_batchEst, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } //copy cnt to device errCode=hipMemcpy( dev_cnt_batchEst, cnt_batchEst, sizeof(unsigned int), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } ////////////////// //SAMPLE OFFSET - TO SAMPLE THE DATA TO ESTIMATE THE TOTAL NUMBER OF KEY VALUE PAIRS ///////////////// //offset into the database when batching the results unsigned int * sampleOffset; sampleOffset=(unsigned int*)malloc(sizeof(unsigned int)); *sampleOffset=offsetRate; unsigned int * dev_sampleOffset; dev_sampleOffset=(unsigned int*)malloc(sizeof(unsigned int)); //allocate on the device errCode=hipMalloc((void**)&dev_sampleOffset, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: sample offset Got error with code " << errCode << endl; } //copy offset to device errCode=hipMemcpy( dev_sampleOffset, sampleOffset, sizeof(unsigned int), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } const int TOTALBLOCKSBATCHEST=ceil((1.0*(*DBSIZE)*sampleRate)/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKSBATCHEST); hipLaunchKernelGGL(( kernelGridIndexBatchEstimator), dim3(TOTALBLOCKSBATCHEST), dim3(BLOCKSIZE), 0, 0, dev_N_batchEst, dev_sampleOffset, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt_batchEst, dev_database); cout<<"\n** ERROR FROM KERNEL LAUNCH OF BATCH ESTIMATOR: "<<hipGetLastError(); // find the size of the number of results errCode=hipMemcpy( cnt_batchEst, dev_cnt_batchEst, sizeof(unsigned int), hipMemcpyDeviceToHost); if(errCode != hipSuccess) { cout << "\nError: getting cnt for batch estimate from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size for estimating the number of batches (sampled): %u",*cnt_batchEst); } hipFree(dev_cnt_batchEst); hipFree(dev_N_batchEst); hipFree(dev_sampleOffset); double tendbatchest=omp_get_wtime(); printf("\nTime to get the total result set size from batch estimator: %f",tendbatchest-tstartbatchest); //WE CALCULATE THE BUFFER SIZES AND NUMBER OF BATCHES unsigned int GPUBufferSize=100000000; double alpha=0.05; //overestimation factor unsigned long long estimatedTotalSize=(unsigned long long)(*cnt_batchEst)*(unsigned long long)offsetRate; unsigned long long estimatedTotalSizeWithAlpha=(unsigned long long)(*cnt_batchEst)*(unsigned long long)offsetRate*(1.0+(alpha)); printf("\nEstimated total result set size: %llu", estimatedTotalSize); printf("\nEstimated total result set size (with Alpha %f): %llu", alpha,estimatedTotalSizeWithAlpha); //to accomodate small datasets, we need smaller buffers because the pinned memory malloc is expensive if (estimatedTotalSize<(GPUBufferSize*GPUSTREAMS)) { GPUBufferSize=estimatedTotalSize*(1.0+(alpha*2.0))/(GPUSTREAMS); //we do 2*alpha for small datasets because the //sampling will be worse for small datasets //but we fix the 3 streams still (thats why divide by 3). } unsigned int numBatches=ceil(((1.0+alpha)*estimatedTotalSize*1.0)/(GPUBufferSize*1.0)); printf("\nNumber of batches: %d, buffer size: %d", numBatches, GPUBufferSize); printf("\nEnd Batch Estimator\n***********************************\n"); ///////////////////////////////////////////////////////// //END BATCH ESTIMATOR ///////////////////////////////////////////////////////// /////////////////// //ALLOCATE POINTERS TO INTEGER ARRAYS FOR THE VALUES FOR THE NEIGHBORTABLES /////////////////// //THE NUMBER OF POINTERS IS EQUAL TO THE NUMBER OF BATCHES for (int i=0; i<numBatches; i++) { int *ptr; struct neighborDataPtrs tmpStruct; tmpStruct.dataPtr=ptr; tmpStruct.sizeOfDataArr=0; pointersToNeighbors->push_back(tmpStruct); } /////////////////// //END ALLOCATE POINTERS TO INTEGER ARRAYS FOR THE VALUES FOR THE NEIGHBORTABLES /////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET USING THE BATCH ESTIMATOR /////////////////////////////////// //NEED BUFFERS ON THE GPU AND THE HOST FOR THE NUMBER OF CONCURRENT STREAMS //GPU BUFFER ON THE DEVICE //BUFFER ON THE HOST WITH PINNED MEMORY FOR FAST MEMCPY //BUFFER ON THE HOST TO DUMP THE RESULTS OF BATCHES SO THAT GPU THREADS CAN CONTINUE //EXECUTING STREAMS ON THE HOST //GPU MEMORY ALLOCATION: //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey[GPUSTREAMS]; //key int * dev_pointInDistValue[GPUSTREAMS]; //value for (int i=0; i<GPUSTREAMS; i++) { errCode=hipMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } } //HOST RESULT ALLOCATION FOR THE GPU TO COPY THE DATA INTO A PINNED MEMORY ALLOCATION //ON THE HOST //pinned result set memory for the host //the number of elements are recorded for that batch in resultElemCountPerBatch //NEED PINNED MEMORY ALSO BECAUSE YOU NEED IT TO USE STREAMS IN THRUST FOR THE MEMCOPY OF THE SORTED RESULTS //PINNED MEMORY TO COPY FROM THE GPU int * pointIDKey[GPUSTREAMS]; //key int * pointInDistValue[GPUSTREAMS]; //value double tstartpinnedresults=omp_get_wtime(); for (int i=0; i<GPUSTREAMS; i++) { hipHostMalloc((void **) &pointIDKey[i], sizeof(int)*GPUBufferSize); hipHostMalloc((void **) &pointInDistValue[i], sizeof(int)*GPUBufferSize); } double tendpinnedresults=omp_get_wtime(); printf("\nTime to allocate pinned memory for results: %f", tendpinnedresults - tstartpinnedresults); // hipMalloc((void **) &pointIDKey, sizeof(int)*GPUBufferSize*NUMBATCHES); // hipMalloc((void **) &pointInDistValue, sizeof(int)*GPUBufferSize*NUMBATCHES); printf("\nmemory requested for results ON GPU (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); printf("\nmemory requested for results in MAIN MEMORY (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// ///////////////////////////////// //SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// omp_set_num_threads(GPUSTREAMS); ///////////////////////////////// //END SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// ///////////////////////////////// //CREATE STREAMS //////////////////////////////// hipStream_t stream[GPUSTREAMS]; for (int i=0; i<GPUSTREAMS; i++){ //hipStreamCreate(&stream[i]); hipStreamCreateWithFlags(&stream[i], hipStreamNonBlocking); } ///////////////////////////////// //END CREATE STREAMS //////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL IN BATCHES /////////////////////////////////// //since we use the strided scheme, some of the batch sizes //are off by 1 of each other, a first group of batches will //have 1 extra data point to process, and we calculate which batch numbers will //have that. The batchSize is the lower value (+1 is added to the first ones) unsigned int batchSize=(*DBSIZE)/numBatches; unsigned int batchesThatHaveOneMore=(*DBSIZE)-(batchSize*numBatches); //batch number 0- < this value have one more printf("\n\n***Batches that have one more: %u batchSize(N): %u, \n\n***",batchSize, batchesThatHaveOneMore); unsigned int totalResultsLoop=0; //FOR LOOP OVER THE NUMBER OF BATCHES STARTS HERE #pragma omp parallel for schedule(static,1) reduction(+:totalResultsLoop) num_threads(GPUSTREAMS) for (int i=0; i<numBatches; i++) { int tid=omp_get_thread_num(); printf("\ntid: %d, starting iteration: %d",tid,i); //N NOW BECOMES THE NUMBER OF POINTS TO PROCESS PER BATCH //AS ONE THREAD PROCESSES A SINGLE POINT if (i<batchesThatHaveOneMore) { N[tid]=batchSize+1; printf("\nN: %d, tid: %d",N[tid], tid); } else { N[tid]=batchSize; printf("\nN (1 less): %d tid: %d",N[tid], tid); } //set relevant parameters for the batched execution that get reset //copy N to device //N IS THE NUMBER OF THREADS errCode=hipMemcpyAsync( &dev_N[tid], &N[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //the batched result set size (reset to 0): cnt[tid]=0; errCode=hipMemcpyAsync( &dev_cnt[tid], &cnt[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch //batchOffset[tid]=i*batchSize; //original batchOffset[tid]=numBatches; //for the strided errCode=hipMemcpyAsync( &dev_offset[tid], &batchOffset[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //the batch number for batching with strided batchNumber[tid]=i; errCode=hipMemcpyAsync( &dev_batchNumber[tid], &batchNumber[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } const int TOTALBLOCKS=ceil((1.0*(N[tid]))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //0 is shared memory pool hipLaunchKernelGGL(( kernelGridIndex), dim3(TOTALBLOCKS), dim3(BLOCKSIZE), 0, stream[tid] , &dev_N[tid], &dev_offset[tid], &dev_batchNumber[tid], dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, &dev_cnt[tid], dev_database, dev_pointIDKey[tid], dev_pointInDistValue[tid]); // errCode=hipDeviceSynchronize(); // cout <<"\n\nError from device synchronize: "<<errCode; cout <<"\n\nKERNEL LAUNCH RETURN: "<<hipGetLastError()<<endl<<endl; if ( hipSuccess != hipGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<hipSuccess<<endl<<endl; } // find the size of the number of results errCode=hipMemcpyAsync( &cnt[tid], &dev_cnt[tid], sizeof(unsigned int), hipMemcpyDeviceToHost, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",cnt[tid]); } //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey[tid]); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue[tid]); //XXXXXXXXXXXXXXXX //THRUST USING STREAMS REQUIRES THRUST V1.8 //SEEMS TO BE WORKING :) //XXXXXXXXXXXXXXXX try{ thrust::sort_by_key(thrust::hip::par.on(stream[tid]), dev_keys_ptr, dev_keys_ptr + cnt[tid], dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //thrust with streams into individual buffers for each batch hipMemcpyAsync(thrust::raw_pointer_cast(pointIDKey[tid]), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); hipMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue[tid]), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //need to make sure the data is copied before constructing portion of the neighbor table hipStreamSynchronize(stream[tid]); double tableconstuctstart=omp_get_wtime(); //set the number of neighbors in the pointer struct: (*pointersToNeighbors)[i].sizeOfDataArr=cnt[tid]; (*pointersToNeighbors)[i].dataPtr=new int[cnt[tid]]; constructNeighborTableKeyValueAlternateTest(pointIDKey[tid], pointInDistValue[tid], neighborTable, (*pointersToNeighbors)[i].dataPtr, &cnt[tid]); //cout <<"\nIn make neighbortable. Data array ptr: "<<(*pointersToNeighbors)[i].dataPtr<<" , size of data array: "<<(*pointersToNeighbors)[i].sizeOfDataArr;cout.flush(); double tableconstuctend=omp_get_wtime(); printf("\nTable construct time: %f", tableconstuctend - tableconstuctstart); //add the batched result set size to the total count totalResultsLoop+=cnt[tid]; printf("\nRunning total of total size of result array, tid: %d: %u", tid, totalResultsLoop); //} } //END LOOP OVER THE GPU BATCHES printf("\nTOTAL RESULT SET SIZE ON HOST: %u", totalResultsLoop); *totalNeighbors=totalResultsLoop; double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) // unsigned int * debug1; // debug1=(unsigned int*)malloc(sizeof(unsigned int)); // *debug1=0; // unsigned int * debug2; // debug2=(unsigned int*)malloc(sizeof(unsigned int)); // *debug2=0; double tStartdebug=omp_get_wtime(); errCode=hipMemcpy(debug1, dev_debug1, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=hipMemcpy(debug2, dev_debug2, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } double tEnddebug=omp_get_wtime(); printf("\nTime to retrieve debug values: %f", tEnddebug - tStartdebug); /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: double tFreeStart=omp_get_wtime(); //destroy streams // for (int i=0; i<GPUSTREAMS; i++) // { // hipStreamDestroy(stream[i]); // } for (int i=0; i<GPUSTREAMS; i++) { errCode=hipStreamDestroy(stream[i]); if(errCode != hipSuccess) { cout << "\nError: destroying stream" << errCode << endl; } } //free the data on the device hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); hipFree(dev_database); hipFree(dev_debug1); hipFree(dev_debug2); hipFree(dev_epsilon); hipFree(dev_grid); hipFree(dev_lookupArr); hipFree(dev_gridNumXCells); hipFree(dev_gridNumYCells); hipFree(dev_gridMin_x); hipFree(dev_gridMin_y); hipFree(dev_N); hipFree(dev_cnt); hipFree(dev_offset); hipFree(dev_batchNumber); //free data related to the individual streams for each batch for (int i=0; i<GPUSTREAMS; i++) { //free the data on the device hipFree(dev_pointIDKey[i]); hipFree(dev_pointInDistValue[i]); //free on the host hipHostFree(pointIDKey[i]); hipHostFree(pointInDistValue[i]); } hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); //free pinned memory on host hipHostFree(pointIDKey); hipHostFree(pointInDistValue); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); //printf("\ntotal neighbors (in batched fn): %d", *totalNeighbors); cout<<"\n** last error at end of fn batches: "<<hipGetLastError(); // printf("\nExiting function early.."); // return; } //void constructNeighborTableKeyValueAlternateTest(int * pointIDKey, int * pointInDistValue, struct neighborTableLookup * neighborTable, int * pointersToNeighbors, unsigned int * cnt); void constructNeighborTableKeyValueAlternateTest(int * pointIDKey, int * pointInDistValue, struct neighborTableLookup * neighborTable, int * pointersToNeighbors, unsigned int * cnt) { //need to take as input a pointer to an array of integers that has not been allocated yet (neighborTableData), 1 per batch (2D array) // int * ptrToData; // pointersToNeighbors.push_back() //allocate memory to the array that holds all of the direct neighbors: //pointersToNeighbors= new int[*cnt]; //record the size of the array // pointersToNeighbors->sizeOfDataArr=*cnt; //copy the value data: std::copy(pointInDistValue, pointInDistValue+(*cnt), pointersToNeighbors); // printf("\nTest copy: "); // int sample=ceil((*cnt)*0.001); // for (int i=0; i<sample; i++) // { // printf("\nval: %d",pointersToNeighbors[i]); // } //Step 1: find all of the unique keys and their positions in the key array //double tstart=omp_get_wtime(); unsigned int numUniqueKeys=0; struct keyData{ int key; int position; }; std::vector<keyData> uniqueKeyData; keyData tmp; tmp.key=pointIDKey[0]; tmp.position=0; uniqueKeyData.push_back(tmp); //we assign the ith data item when iterating over i+1th data item, //so we go 1 loop iteration beyond the number (*cnt) for (int i=1; i<(*cnt)+1; i++) { if (pointIDKey[i-1]!=pointIDKey[i]) { numUniqueKeys++; tmp.key=pointIDKey[i]; tmp.position=i; uniqueKeyData.push_back(tmp); } } //insert into the neighbor table the values based on the positions of //the unique keys obtained above. for (int i=0; i<uniqueKeyData.size()-1; i++) { int keyElem=uniqueKeyData[i].key; neighborTable[keyElem].pointID=keyElem; neighborTable[keyElem].indexmin=uniqueKeyData[i].position; neighborTable[keyElem].indexmax=uniqueKeyData[i+1].position-1; //update the pointer to the data array for the values neighborTable[keyElem].dataPtr=pointersToNeighbors; } /* //newer multithreaded way: //Step 1: find all of the unique keys and their positions in the key array //double tstart=omp_get_wtime(); unsigned int numUniqueKeys=0; unsigned int count=0; struct keyData{ int key; int position; }; std::vector<keyData> uniqueKeyData; keyData tmp; tmp.key=pointIDKey[0]; tmp.position=0; uniqueKeyData.push_back(tmp); //we assign the ith data item when iterating over i+1th data item, //so we go 1 loop iteration beyond the number (*cnt) for (int i=1; i<(*cnt)+1; i++) { if (pointIDKey[i-1]!=pointIDKey[i]) { numUniqueKeys++; tmp.key=pointIDKey[i]; tmp.position=i; uniqueKeyData.push_back(tmp); } } //Step 2: In parallel, insert into the neighbor table the values based on the positions of //the unique keys obtained above. Since multiple threads access this function, we don't want to oversubscribe the //machine with nested parallelism, so limit to 2 threads omp_set_nested(1); #pragma omp parallel for reduction(+:count) num_threads(2) schedule(static,1) for (int i=0; i<uniqueKeyData.size()-1; i++) { int keyElem=uniqueKeyData[i].key; int valStart=uniqueKeyData[i].position; int valEnd=uniqueKeyData[i+1].position-1; int size=valEnd-valStart+1; //printf("\nval: start:%d, end: %d", valStart,valEnd); neighborTable[keyElem].pointID=keyElem; neighborTable[keyElem].neighbors.insert(neighborTable[keyElem].neighbors.begin(),&pointInDistValue[valStart],&pointInDistValue[valStart+size]); //printf("\ni: %d, keyElem: %d, position start: %d, position end: %d, size: %d", i,keyElem,valStart, valEnd,size); count+=size; } */ } void constructNeighborTableKeyValue(int * pointIDKey, int * pointInDistValue, struct table * neighborTable, unsigned int * cnt) { //newer multithreaded way: //Step 1: find all of the unique keys and their positions in the key array //double tstart=omp_get_wtime(); unsigned int numUniqueKeys=0; unsigned int count=0; struct keyData{ int key; int position; }; std::vector<keyData> uniqueKeyData; keyData tmp; tmp.key=pointIDKey[0]; tmp.position=0; uniqueKeyData.push_back(tmp); //we assign the ith data item when iterating over i+1th data item, //so we go 1 loop iteration beyond the number (*cnt) for (int i=1; i<(*cnt)+1; i++) { if (pointIDKey[i-1]!=pointIDKey[i]) { numUniqueKeys++; tmp.key=pointIDKey[i]; tmp.position=i; uniqueKeyData.push_back(tmp); } } //Step 2: In parallel, insert into the neighbor table the values based on the positions of //the unique keys obtained above. Since multiple threads access this function, we don't want to oversubscribe the //machine with nested parallelism, so limit to 2 threads omp_set_nested(1); #pragma omp parallel for reduction(+:count) num_threads(2) schedule(static,1) for (int i=0; i<uniqueKeyData.size()-1; i++) { int keyElem=uniqueKeyData[i].key; int valStart=uniqueKeyData[i].position; int valEnd=uniqueKeyData[i+1].position-1; int size=valEnd-valStart+1; //printf("\nval: start:%d, end: %d", valStart,valEnd); neighborTable[keyElem].pointID=keyElem; neighborTable[keyElem].neighbors.insert(neighborTable[keyElem].neighbors.begin(),&pointInDistValue[valStart],&pointInDistValue[valStart+size]); //printf("\ni: %d, keyElem: %d, position start: %d, position end: %d, size: %d", i,keyElem,valStart, valEnd,size); count+=size; } } void constructNeighborTable(thrust::host_vector<structresults> * hVectResults, struct table * neighborTable, unsigned int * cnt) { //original way: // for (unsigned int i=0; i<(*cnt); i++) // { // unsigned int elemID=hVectResults[i].pointID; // neighborTable[elemID].pointID=elemID; // neighborTable[elemID].neighbors.push_back(hVectResults[i].pointInDist); // } //end original way //new way: loop over and find the ranges of the different point ids //then make one insert into the vector unsigned int lastElemID=(*hVectResults)[0].pointID; unsigned int lastIndex=0; //we assign the ith data item when iterating over i+1th data item, //so we go 1 loop iteration beyond the number (*cnt) for (unsigned int i=1; i<(*cnt)+1; i++) { if ((*hVectResults)[i].pointID!=lastElemID) { unsigned int rangemax=i-1; int tmpSize=rangemax-lastIndex+1; unsigned int tmp[tmpSize]; for (int j=lastIndex; j<=rangemax; j++) { tmp[j-lastIndex]=(*hVectResults)[j].pointInDist; } neighborTable[lastElemID].pointID=lastElemID; neighborTable[lastElemID].neighbors.insert(neighborTable[lastElemID].neighbors.begin(),tmp, tmp+tmpSize); //update the new last elem id lastElemID=(*hVectResults)[i].pointID; lastIndex=i; } } //print table: /* int tmpcnt=0; printf("\nGrid GPU Table**********"); for (int i=0; i<(*N); i++) { printf("\nPoint id: %d In distance: ", neighborTable[i].pointID); for (int j=0; j<neighborTable[i].neighbors.size();j++) { printf("%d, ",neighborTable[i].neighbors[j]); tmpcnt++; } } printf("\n count elems: %d", tmpcnt); */ } //Uses a brute force kernel to calculate the direct neighbors of the points in the database void makeDistanceTableGPUBruteForce(std::vector<struct dataElem> * dataPoints, double * epsilon, struct table * neighborTable, int * totalNeighbors) { //CUDA error code: hipError_t errCode; /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in main GPU method: N is: %u",*N);cout.flush(); struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); printf("\n !!in main GPU method: N is: %u",*N);cout.flush(); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// //NON-PINNED MEMORY FOR SINGLE KERNEL INVOCATION (NO BATCHING) //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=hipMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // hipHostMalloc((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // hipHostMalloc((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //size of the database: unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; unsigned int * dev_debug2; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; //allocate on the device errCode=hipMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=hipMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //copy N, epsilon and cnt to the device //epsilon errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //N (DATASET SIZE) errCode=hipMemcpy( dev_N, N, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } /////////////////////////////////// //END SET OTHER KERNEL PARAMETERS /////////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL /////////////////////////////////// const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel hipLaunchKernelGGL(( kernelBruteForce), dim3(TOTALBLOCKS), dim3(BLOCKSIZE) , 0, 0, dev_N, dev_debug1, dev_debug2, dev_epsilon, dev_cnt, dev_database, dev_pointIDKey, dev_pointInDistValue); if ( hipSuccess != hipGetLastError() ){ printf( "Error in kernel launch!\n" ); } /////////////////////////////////// //END LAUNCH KERNEL /////////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size on within epsilon: %d",*cnt); } //copy the results, but only transfer the number of results, not the entire buffer // errCode=hipMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), hipMemcpyDeviceToHost ); // if(errCode != hipSuccess) { // cout << "\nError: getting results from GPU Got error with code " << errCode << endl; // } *totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) unsigned int * debug1; debug1=(unsigned int*)malloc(sizeof(unsigned int)); *debug1=0; unsigned int * debug2; debug2=(unsigned int*)malloc(sizeof(unsigned int)); *debug2=0; errCode=hipMemcpy(debug1, dev_debug1, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=hipMemcpy(debug2, dev_debug2, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: hipFree(dev_database); hipFree(dev_debug1); hipFree(dev_debug2); hipFree(dev_cnt); hipFree(dev_epsilon); //hipFree(dev_results); //////////////////////////////////// //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //print table: // for (int i=0; i<(*N); i++) // { // printf("\nPoint id: %d In distance: ", neighborTable[i].pointID); // for (int j=0; j<neighborTable[i].neighbors.size();j++) // { // printf("%d, ",neighborTable[i].neighbors[j]); // } // } //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //generates an array and lookup table for the GPU, from individual pointers to the neighbors from a previous table's results //This one uses the new implementation that doesn't use vectors //Input: //numPoints //inputNeighborTable //OLD: dataPtr - vector of pointers to the arrays containing the neighbors across the previous batches in the neighborTableLookup //std::vector<struct neighborDataPtrs> *dataPtr, //Outputs: //directNeighborArray -- the ids of the points within epsilon of the input table //gpuLookupArray -- points to the indices of the elements in directNeighborArray void generateNeighborArrayForGPUAlternative(unsigned int numPoints, struct neighborTableLookup * inputNeighborTable, int * directNeighborArray, struct gpulookuptable * gpuLookupArray) { //populate the direct neighboer array //and the lookup array at the same time //This is because the neighbors for each data point are stored across the various arrays allocated for each batch int startIndex=0; for (int i=0; i<numPoints; i++) { int indexmin=inputNeighborTable[i].indexmin; int indexmax=inputNeighborTable[i].indexmax; int * data= (inputNeighborTable[i].dataPtr)+indexmin; int sizeRange=indexmax-indexmin+1; //printf("\nIteration: %d, Start index: %d, sizeRange: %d",i,startIndex,sizeRange); //std::copy(data+indexmin, data+sizeRange, directNeighborArray+startIndex); std::copy(data, data+sizeRange, directNeighborArray+startIndex); gpuLookupArray[i].indexmin=startIndex; gpuLookupArray[i].indexmax=startIndex+sizeRange-1; startIndex+=sizeRange; } } //generates an array and lookup table for the GPU. This is because we can't use vectors on the GPU. void generateNeighborArrayForGPU(unsigned int numPoints, struct table * inputNeighborTable, int * directNeighborArray, struct gpulookuptable * gpuLookupArray) { int startIndex=0; unsigned int cnt=0; for (int i=0; i<numPoints; i++) { startIndex=cnt; for (int j=0; j<inputNeighborTable[i].neighbors.size(); j++) { directNeighborArray[cnt]=inputNeighborTable[i].neighbors[j]; cnt++; } gpuLookupArray[i].indexmin=startIndex; gpuLookupArray[i].indexmax=cnt-1; } } void generateDistanceTableFromPreviousTable(std::vector<struct dataElem> * dataPoints, struct gpulookuptable * gpuLookupArray, int * directNeighborArray, int * totalDirectNeighbors, double * epsilon, struct table * neighborTable) { printf("\nIn generate from previous table:\nDatapoints: %lu, \nTotal direct neighbors: %d\n",dataPoints->size(), *totalDirectNeighbors); cout<<"\n** Last CUDA error start of fn: "<<hipGetLastError(); //CUDA error code: hipError_t errCode; unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in generate previous table GPU method: N is: %u",*N);cout.flush(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); if(errCode != hipSuccess) { cout << "\nError: database (in previous table method) Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database memcopy (in previous table method) Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////// //Copy the lookup array struct to the GPU: /////////////////////////////// struct gpulookuptable * dev_gpuLookupArray; dev_gpuLookupArray=(struct gpulookuptable*)malloc(sizeof(struct gpulookuptable)*(*N)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_gpuLookupArray, sizeof(struct gpulookuptable)*(*N)); if(errCode != hipSuccess) { cout << "\nError: gpu lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=hipMemcpy(dev_gpuLookupArray, gpuLookupArray, sizeof(struct gpulookuptable)*(*N), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: lookup array memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END Copy the lookup array struct to the GPU: /////////////////////////////// /////////////////////////////// //Copy direct neighbor array to the GPU: /////////////////////////////// int * dev_directNeighborArray; dev_directNeighborArray=(int*)malloc(sizeof(int)*(*totalDirectNeighbors)); //allocate memory on device for the direct neighbor array: errCode=hipMalloc( (void**)&dev_directNeighborArray, sizeof(int)*(*totalDirectNeighbors)); if(errCode != hipSuccess) { cout << "\nError: gpu direct neighbor array Got error with code " << errCode << endl; cout.flush(); } //copy direct neighbor array to the device: errCode=hipMemcpy(dev_directNeighborArray, directNeighborArray, sizeof(int)*(*totalDirectNeighbors), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: direct neighbor array memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END Copy direct neighbor array to the GPU: /////////////////////////////// /////////////////////////////// //copy the size of the database /////////////////////////////// unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_N, N, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } /////////////////////////////// //END copy the size of the database /////////////////////////////// /////////////////////////////// //copy the newer (smaller) epsilon /////////////////////////////// double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END copy the newer (smaller) epsilon /////////////////////////////// /////////////////////////////////// //ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// // struct structresults * dev_results; // struct structresults * results; // errCode=hipMalloc((void **)&dev_results, sizeof(struct structresults)*BUFFERELEM); // if(errCode != hipSuccess) { // cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory // } // printf("\nmemory requested for results from previous table (GiB): %f",(double)(sizeof(struct structresults)*BUFFERELEM)/(1024*1024*1024)); // //host result allocation: // results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=hipMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // hipHostMalloc((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // hipHostMalloc((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////// //EXECUTE KERNEL /////////////////////////////// const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks (from previous table method): %d",TOTALBLOCKS); //execute kernel hipLaunchKernelGGL(( calcNeighborsFromTableKernel), dim3(TOTALBLOCKS), dim3(BLOCKSIZE) , 0, 0, dev_N, dev_gpuLookupArray, dev_directNeighborArray, dev_cnt, dev_epsilon, dev_database, dev_pointIDKey, dev_pointInDistValue); cout <<endl<<"After kernel launch, Error code: "<<hipGetLastError()<<endl; if ( hipSuccess != hipGetLastError() ){ printf( "\nError in kernel launch (previous table method)!" ); // cout <<endl<<"Error code: "<<hipGetLastError()<<endl; } /////////////////////////////// //END EXECUTE KERNEL /////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results //errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); // if(errCode != hipSuccess) { // cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; // } // else // { // printf("\nGPU: result set size on within epsilon: %d",*cnt); // } errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size on GPU within epsilon (from precomputed table method): %d",*cnt); } //copy the results, but only transfer the number of results, not the entire buffer // errCode=hipMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), hipMemcpyDeviceToHost ); // if(errCode != hipSuccess) { // cout << "\nError: getting results from GPU (from the precomputed table) Got error with code " << errCode << endl; // } //*totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// ///////////////// //FREE ///////////////// hipFree(dev_directNeighborArray); hipFree(dev_gpuLookupArray); //hipFree(dev_results); hipFree(dev_database); hipFree(dev_epsilon); hipFree(dev_N); hipFree(dev_cnt); //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); // allocate space for the output //thrust::device_vector<int> sortedKeys(*cnt); //thrust::device_vector<int> sortedVals(*cnt); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// /* //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //////////////////////////////////// double tstartsort=omp_get_wtime(); //make a host vector initialized with the results that have been transfered from the GPU thrust::host_vector<structresults> hVectResults(results,results+(*cnt)); // for (int i=0;i<numResults;i++) // { // printf("\n host vector: %d, %d",hVectResults[i].pointID,hVectResults[i].pointInDist); // } // for (int i=0; i<numResults; i++) // { // structresults tmp; // tmp.pointID=0; // tmp.pointInDist=0; // hVectResults.push_back(tmp); // } //Now transfer the hostvector to the device: thrust::device_vector<structresults> dVectResults=hVectResults; //sort the device vector on the GPU try{ thrust::sort(dVectResults.begin(), dVectResults.end(),compareThrust()); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } // transfer the sorted results back to host thrust::copy(dVectResults.begin(), dVectResults.end(), hVectResults.begin()); double tendsort=omp_get_wtime(); printf("\nTime to sort on the GPU (from precompute table): %f",tendsort-tstartsort); //print GPU: // for (int i=0; i<(*cnt);i++) // { // printf("\nPrecompute GPU elem: %d, data: %d",hVectResults[i].pointID,hVectResults[i].pointInDist); // } //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// for (unsigned int i=0; i<(*cnt); i++) { unsigned int index=hVectResults[i].pointID; neighborTable[index].pointID=index; neighborTable[index].neighbors.push_back(hVectResults[i].pointInDist); } */ //print table: /* printf("\n****Precompute table: "); for (int i=0; i<(*N); i++) { printf("\nPoint id: %d In distance: ", neighborTable[i].pointID); for (int j=0; j<neighborTable[i].neighbors.size();j++) { printf("%d, ",neighborTable[i].neighbors[j]); } } */ //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //USE THIS TO MAKE A TABLE FROM A PREVIOUS TABLE WITH A HIGHER EPSILON //TAKES AS INPUT: //The data points (Database) gpuLookupArray //A lookup array that points to an array with the neighbors of each data point (directNeighborArray) //The total number of direct neighbors: totalDirectNeighbors //epsilon //previousEpsilon- the epsilon that made the input direct neighbors: used to estimate batch sizes for the new epsilon //The resulting neighborTable to be passed into DBSCAN //The total number of neighbors in the table //It batches the results off of the GPU. //However, if the number of direct neighbors are too large, we don't batch these on and off in addition to the resultset //We return false and generate a new neighborTable using the index and not another neighborTable) bool generateDistanceTableFromPreviousTableBatches(std::vector<struct dataElem> * dataPoints, struct gpulookuptable * gpuLookupArray, int * directNeighborArray, unsigned int * totalDirectNeighbors, double * epsilon, double * previousEpsilon, struct table * neighborTable, unsigned int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); printf("\nIn generate from previous table:\nDatapoints: %lu, \nTotal direct neighbors: %d\n",dataPoints->size(), *totalDirectNeighbors); cout<<"\n** Last CUDA error start of fn: "<<hipGetLastError(); printf("\n\nNOTE THAT SEG FAULTS ARE TYPICALLY DUE TO INSUFFICIENT BUFFER SPACE FOR THE RESULTS WHEN BATCHING\n\n"); //CUDA error code: hipError_t errCode; unsigned int * DBSIZE; DBSIZE=(unsigned int*)malloc(sizeof(unsigned int)); *DBSIZE=dataPoints->size(); printf("\n in generate previous table GPU method: DNSIZE is: %u",*DBSIZE);cout.flush(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*DBSIZE)); if(errCode != hipSuccess) { cout << "\nError: database (in previous table method) Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<*DBSIZE; i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*DBSIZE), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database memcopy (in previous table method) Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////// //Copy the lookup array struct to the GPU: /////////////////////////////// struct gpulookuptable * dev_gpuLookupArray; dev_gpuLookupArray=(struct gpulookuptable*)malloc(sizeof(struct gpulookuptable)*(*DBSIZE)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_gpuLookupArray, sizeof(struct gpulookuptable)*(*DBSIZE)); if(errCode != hipSuccess) { cout << "\nError: gpu lookup array Got error with code " << errCode << endl; cout.flush(); } printf("\nSize of lookup table: %f (GiB)", (double)sizeof(struct gpulookuptable)*(*DBSIZE)/(1024*1024*1024)); //copy lookup array to the device: errCode=hipMemcpy(dev_gpuLookupArray, gpuLookupArray, sizeof(struct gpulookuptable)*(*DBSIZE), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: lookup array memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END Copy the lookup array struct to the GPU: /////////////////////////////// /////////////////////////////// //Copy direct neighbor array to the GPU: /////////////////////////////// int * dev_directNeighborArray; dev_directNeighborArray=(int*)malloc(sizeof(int)*(*totalDirectNeighbors)); //allocate memory on device for the direct neighbor array: errCode=hipMalloc( (void**)&dev_directNeighborArray, sizeof(int)*(*totalDirectNeighbors)); if(errCode != hipSuccess) { cout << "\nError: gpu direct neighbor array Got error with code " << errCode << endl; cout.flush(); } //copy direct neighbor array to the device: errCode=hipMemcpy(dev_directNeighborArray, directNeighborArray, sizeof(int)*(*totalDirectNeighbors), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: direct neighbor array memcpy Got error with code " << errCode << endl; } printf("\nSize of direct neighbor array: %f (GiB)", (double)sizeof(int)*(*totalDirectNeighbors)/(1024*1024*1024)); /////////////////////////////// //END Copy direct neighbor array to the GPU: /////////////////////////////// /////////////////////////////// //copy the size of the database /////////////////////////////// //number of threads per gpu stream //THE NUMBER OF THREADS THAT ARE LAUNCHED IN A SINGLE KERNEL INVOCATION //CAN BE FEWER THAN THE NUMBER OF ELEMENTS IN THE DATABASE IF MORE THAN 1 BATCH unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: dev_N Got error with code " << errCode << endl; } //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_offset, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_batchNumber, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } /////////////////////////////// //END copy the size of the database /////////////////////////////// /////////////////////////////// //copy the newer (smaller) epsilon /////////////////////////////// double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END copy the newer (smaller) epsilon /////////////////////////////// /////////////////////////////////// //ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// //count values - for an individual kernel launch //need different count values for each stream unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } // errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); // if(errCode != hipSuccess) { // cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; // } /////////////////////////////////// //END ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET USING PREVIOUS SIZE OF NEIGHBORTABLE /////////////////////////////////// //NEED BUFFERS ON THE GPU AND THE HOST FOR THE NUMBER OF CONCURRENT STREAMS //GPU BUFFER ON THE DEVICE //BUFFER ON THE HOST WITH PINNED MEMORY FOR FAST MEMCPY //BUFFER ON THE HOST TO DUMP THE RESULTS OF BATCHES SO THAT GPU THREADS CAN CONTINUE //EXECUTING STREAMS ON THE HOST unsigned int GPUBufferSize=100000000; double alpha=1; //overestimation factor is greater for the table because as epsilon increases, the //total number of neighbors within the epsilon neighborhood increases at a lower rate //i.e., as epsilon approaches infinity, the total number of neighbors within epsilon //becomes constant. int numBatches=0; double areaRatioNewOldEpsilon=(M_PI*(*epsilon)*(*epsilon))/(M_PI*(*previousEpsilon)*(*previousEpsilon)); unsigned int estimatedTotalSize=(*totalDirectNeighbors)*areaRatioNewOldEpsilon*(1.0+alpha); printf("\nPrevious table size: %u, area ratio of epsilons: %f, estimated total size (incl. alpha): %u", *totalDirectNeighbors, areaRatioNewOldEpsilon, estimatedTotalSize); //to accomodate small datasets, we need smaller buffers because the pinned memory malloc is expensive if (estimatedTotalSize<(GPUBufferSize*GPUSTREAMS)) { GPUBufferSize=estimatedTotalSize/GPUSTREAMS; //but we fix the 3 streams still (thats why divide by 3). } numBatches=ceil(estimatedTotalSize*1.0/GPUBufferSize*1.0); printf("\n\nNumber of batches: %d, buffer size: %d\n\n", numBatches, GPUBufferSize); //GPU MEMORY ALLOCATION: //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey[GPUSTREAMS]; //key int * dev_pointInDistValue[GPUSTREAMS]; //value for (int i=0; i<GPUSTREAMS; i++) { errCode=hipMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } } //HOST RESULT ALLOCATION FOR THE GPU TO COPY THE DATA INTO A PINNED MEMORY ALLOCATION //ON THE HOST //pinned result set memory for the host //the number of elements are recorded for that batch in resultElemCountPerBatch //NEED PINNED MEMORY ALSO BECAUSE YOU NEED IT TO USE STREAMS IN THRUST FOR THE MEMCOPY OF THE SORTED RESULTS //PINNED MEMORY TO COPY FROM THE GPU int * pointIDKey[GPUSTREAMS]; //key int * pointInDistValue[GPUSTREAMS]; //value double tstartpinnedresults=omp_get_wtime(); for (int i=0; i<GPUSTREAMS; i++) { hipHostMalloc((void **) &pointIDKey[i], sizeof(int)*GPUBufferSize); hipHostMalloc((void **) &pointInDistValue[i], sizeof(int)*GPUBufferSize); } double tendpinnedresults=omp_get_wtime(); printf("\nTime to allocate pinned memory for results: %f", tendpinnedresults - tstartpinnedresults); // hipMalloc((void **) &pointIDKey, sizeof(int)*GPUBufferSize*NUMBATCHES); // hipMalloc((void **) &pointInDistValue, sizeof(int)*GPUBufferSize*NUMBATCHES); printf("\nmemory requested for results ON GPU (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); printf("\nmemory requested for results in MAIN MEMORY (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// ///////////////////////////////// //SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// omp_set_num_threads(GPUSTREAMS); ///////////////////////////////// //END SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// ///////////////////////////////// //CREATE STREAMS //////////////////////////////// hipStream_t stream[GPUSTREAMS]; for (int i=0; i<GPUSTREAMS; i++){ //hipStreamCreate(&stream[i]); hipStreamCreateWithFlags(&stream[i], hipStreamNonBlocking); } ///////////////////////////////// //END CREATE STREAMS //////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL IN BATCHES /////////////////////////////////// //since we use the strided scheme, some of the batch sizes //are off by 1 of each other, a first group of batches will //have 1 extra data point to process, and we calculate which batch numbers will //have that. The batchSize is the lower value (+1 is added to the first ones) unsigned int batchSize=(*DBSIZE)/numBatches; unsigned int batchesThatHaveOneMore=(*DBSIZE)-(batchSize*numBatches); //batch number 0- < this value have one more printf("\n\n***Batches that have one more: %u batchSize(N): %u, \n\n***",batchSize, batchesThatHaveOneMore); unsigned int totalResultsLoop=0; /* //////OLD NON-BATCHED const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks (from previous table method): %d",TOTALBLOCKS); //execute kernel hipLaunchKernelGGL(( calcNeighborsFromTableKernel), dim3(TOTALBLOCKS), dim3(BLOCKSIZE) , 0, 0, dev_N, dev_gpuLookupArray, dev_directNeighborArray, dev_cnt, dev_epsilon, dev_database, dev_pointIDKey, dev_pointInDistValue); cout <<endl<<"After kernel launch, Error code: "<<hipGetLastError()<<endl; if ( hipSuccess != hipGetLastError() ){ printf( "\nError in kernel launch (previous table method)!" ); // cout <<endl<<"Error code: "<<hipGetLastError()<<endl; } */ //FOR LOOP OVER THE NUMBER OF BATCHES STARTS HERE #pragma omp parallel for schedule(static,1) reduction(+:totalResultsLoop) num_threads(GPUSTREAMS) for (int i=0; i<numBatches; i++) { int tid=omp_get_thread_num(); printf("\ntid: %d, starting iteration: %d",tid,i); //N NOW BECOMES THE NUMBER OF POINTS TO PROCESS PER BATCH //AS ONE THREAD PROCESSES A SINGLE POINT if (i<batchesThatHaveOneMore) { N[tid]=batchSize+1; printf("\nN: %d, tid: %d",N[tid], tid); } else { N[tid]=batchSize; printf("\nN (1 less): %d tid: %d",N[tid], tid); } //set relevant parameters for the batched execution that get reset //copy N to device //N IS THE NUMBER OF THREADS errCode=hipMemcpyAsync( &dev_N[tid], &N[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //the batched result set size (reset to 0): cnt[tid]=0; errCode=hipMemcpyAsync( &dev_cnt[tid], &cnt[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch //batchOffset[tid]=i*batchSize; //original batchOffset[tid]=numBatches; //for the strided errCode=hipMemcpyAsync( &dev_offset[tid], &batchOffset[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //the batch number for batching with strided batchNumber[tid]=i; errCode=hipMemcpyAsync( &dev_batchNumber[tid], &batchNumber[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } const int TOTALBLOCKS=ceil((1.0*(N[tid]))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //0 is shared memory pool hipLaunchKernelGGL(( calcNeighborsFromTableKernelBatches), dim3(TOTALBLOCKS), dim3(BLOCKSIZE), 0, stream[tid] , &dev_N[tid], &dev_offset[tid], &dev_batchNumber[tid], dev_gpuLookupArray, dev_directNeighborArray, &dev_cnt[tid], dev_epsilon, dev_database, dev_pointIDKey[tid], dev_pointInDistValue[tid]); cout <<"\n\nKERNEL LAUNCH RETURN: "<<hipGetLastError()<<endl<<endl; if ( hipSuccess != hipGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<hipSuccess<<endl<<endl; } // find the size of the number of results errCode=hipMemcpyAsync( &cnt[tid], &dev_cnt[tid], sizeof(unsigned int), hipMemcpyDeviceToHost, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\n\nGPU: result set size within epsilon (CONSTRUCT FROM PREVIOUS NEIGHBORTABLE BATCHES): %d\n\n",cnt[tid]); } //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey[tid]); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue[tid]); //XXXXXXXXXXXXXXXX //THRUST USING STREAMS REQUIRES THRUST V1.8 //SEEMS TO BE WORKING :) //XXXXXXXXXXXXXXXX try{ thrust::sort_by_key(thrust::hip::par.on(stream[tid]), dev_keys_ptr, dev_keys_ptr + cnt[tid], dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //thrust with streams into individual buffers for each batch hipMemcpyAsync(thrust::raw_pointer_cast(pointIDKey[tid]), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); hipMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue[tid]), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //need to make sure the data is copied before constructing portion of the neighbor table hipStreamSynchronize(stream[tid]); //construct portion of the table: double tableconstuctstart=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey[tid], pointInDistValue[tid], neighborTable, &cnt[tid]); double tableconstuctend=omp_get_wtime(); printf("\nTable construct time: %f", tableconstuctend - tableconstuctstart); //add the batched result set size to the total count totalResultsLoop+=cnt[tid]; printf("\nRunning total of total size of result array, tid: %d: %u", tid, totalResultsLoop); //} } //END LOOP OVER THE GPU BATCHES printf("\nTOTAL RESULT SET SIZE ON HOST: %d", totalResultsLoop); *totalNeighbors=totalResultsLoop; double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); /////////////////////////////// //END EXECUTE KERNEL /////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results //errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); // if(errCode != hipSuccess) { // cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; // } // else // { // printf("\nGPU: result set size on within epsilon: %d",*cnt); // } /* errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size on GPU within epsilon (from precomputed table method): %d",*cnt); } */ //copy the results, but only transfer the number of results, not the entire buffer // errCode=hipMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), hipMemcpyDeviceToHost ); // if(errCode != hipSuccess) { // cout << "\nError: getting results from GPU (from the precomputed table) Got error with code " << errCode << endl; // } //*totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// double tFreeStart=omp_get_wtime(); ///////////////// //FREE ///////////////// for (int i=0; i<GPUSTREAMS; i++) { errCode=hipStreamDestroy(stream[i]); if(errCode != hipSuccess) { cout << "\nError: destroying stream" << errCode << endl; } } hipFree(dev_directNeighborArray); hipFree(dev_gpuLookupArray); //hipFree(dev_results); hipFree(dev_database); hipFree(dev_epsilon); hipFree(dev_N); hipFree(dev_cnt); hipFree(dev_offset); hipFree(dev_batchNumber); //free data related to the individual streams for each batch for (int i=0; i<GPUSTREAMS; i++) { //free the data on the device hipFree(dev_pointIDKey[i]); hipFree(dev_pointInDistValue[i]); //free on the host hipHostFree(pointIDKey[i]); hipHostFree(pointInDistValue[i]); } hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); //free pinned memory on host hipHostFree(pointIDKey); hipHostFree(pointInDistValue); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); cout<<"\n** last error at end of fn construct table batches: "<<hipGetLastError(); return true; } //USE THIS TO MAKE A TABLE FROM A PREVIOUS TABLE WITH A HIGHER EPSILON //TAKES AS INPUT: //The data points (Database) gpuLookupArray //A lookup array that points to an array with the neighbors of each data point (directNeighborArray) //The total number of direct neighbors: totalDirectNeighbors //epsilon //previousEpsilon- the epsilon that made the input direct neighbors: used to estimate batch sizes for the new epsilon //The resulting neighborTable to be passed into DBSCAN //The total number of neighbors in the table //It batches the results off of the GPU. //However, if the number of direct neighbors are too large, we don't batch these on and off in addition to the resultset //We return false and generate a new neighborTable using the index and not another neighborTable) bool generateDistanceTableFromPreviousTableBatchesAlternate(std::vector<struct dataElem> * dataPoints, struct gpulookuptable * gpuLookupArray, int * directNeighborArray, unsigned int * totalDirectNeighbors, double * epsilon, double * previousEpsilon, struct neighborTableLookup * neighborTable, std::vector<struct neighborDataPtrs> * pointersToNeighbors,unsigned int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); printf("\nIn generate from previous table:\nDatapoints: %lu, \nTotal direct neighbors: %d\n",dataPoints->size(), *totalDirectNeighbors); cout<<"\n** Last CUDA error start of fn: "<<hipGetLastError(); printf("\n\nNOTE THAT SEG FAULTS ARE TYPICALLY DUE TO INSUFFICIENT BUFFER SPACE FOR THE RESULTS WHEN BATCHING\n\n"); //CUDA error code: hipError_t errCode; unsigned int * DBSIZE; DBSIZE=(unsigned int*)malloc(sizeof(unsigned int)); *DBSIZE=dataPoints->size(); printf("\n in generate previous table GPU method: DNSIZE is: %u",*DBSIZE);cout.flush(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*(*DBSIZE)); if(errCode != hipSuccess) { cout << "\nError: database (in previous table method) Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<*DBSIZE; i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*(*DBSIZE), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database memcopy (in previous table method) Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////// //Copy the lookup array struct to the GPU: /////////////////////////////// struct gpulookuptable * dev_gpuLookupArray; dev_gpuLookupArray=(struct gpulookuptable*)malloc(sizeof(struct gpulookuptable)*(*DBSIZE)); //allocate memory on device: errCode=hipMalloc( (void**)&dev_gpuLookupArray, sizeof(struct gpulookuptable)*(*DBSIZE)); if(errCode != hipSuccess) { cout << "\nError: gpu lookup array Got error with code " << errCode << endl; cout.flush(); } printf("\nSize of lookup table: %f (GiB)", (double)sizeof(struct gpulookuptable)*(*DBSIZE)/(1024*1024*1024)); //copy lookup array to the device: errCode=hipMemcpy(dev_gpuLookupArray, gpuLookupArray, sizeof(struct gpulookuptable)*(*DBSIZE), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: lookup array memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END Copy the lookup array struct to the GPU: /////////////////////////////// /////////////////////////////// //Copy direct neighbor array to the GPU: /////////////////////////////// int * dev_directNeighborArray; dev_directNeighborArray=(int*)malloc(sizeof(int)*(*totalDirectNeighbors)); //allocate memory on device for the direct neighbor array: errCode=hipMalloc( (void**)&dev_directNeighborArray, sizeof(int)*(*totalDirectNeighbors)); if(errCode != hipSuccess) { cout << "\nError: gpu direct neighbor array Got error with code " << errCode << endl; cout.flush(); } //copy direct neighbor array to the device: errCode=hipMemcpy(dev_directNeighborArray, directNeighborArray, sizeof(int)*(*totalDirectNeighbors), hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: direct neighbor array memcpy Got error with code " << errCode << endl; } printf("\nSize of direct neighbor array: %f (GiB)", (double)sizeof(int)*(*totalDirectNeighbors)/(1024*1024*1024)); /////////////////////////////// //END Copy direct neighbor array to the GPU: /////////////////////////////// /////////////////////////////// //copy the size of the database /////////////////////////////// //number of threads per gpu stream //THE NUMBER OF THREADS THAT ARE LAUNCHED IN A SINGLE KERNEL INVOCATION //CAN BE FEWER THAN THE NUMBER OF ELEMENTS IN THE DATABASE IF MORE THAN 1 BATCH unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: dev_N Got error with code " << errCode << endl; } //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_offset, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=hipMalloc((void**)&dev_batchNumber, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } /////////////////////////////// //END copy the size of the database /////////////////////////////// /////////////////////////////// //copy the newer (smaller) epsilon /////////////////////////////// double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //Allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END copy the newer (smaller) epsilon /////////////////////////////// /////////////////////////////////// //ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// //count values - for an individual kernel launch //need different count values for each stream unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *dev_cnt=0; //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)*GPUSTREAMS); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } // errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); // if(errCode != hipSuccess) { // cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; // } /////////////////////////////////// //END ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET USING PREVIOUS SIZE OF NEIGHBORTABLE /////////////////////////////////// //NEED BUFFERS ON THE GPU AND THE HOST FOR THE NUMBER OF CONCURRENT STREAMS //GPU BUFFER ON THE DEVICE //BUFFER ON THE HOST WITH PINNED MEMORY FOR FAST MEMCPY //BUFFER ON THE HOST TO DUMP THE RESULTS OF BATCHES SO THAT GPU THREADS CAN CONTINUE //EXECUTING STREAMS ON THE HOST unsigned int GPUBufferSize=100000000; double alpha=0.6; //overestimation factor is greater for the table because as epsilon increases, the //total number of neighbors within the epsilon neighborhood increases at a lower rate //i.e., as epsilon approaches infinity, the total number of neighbors within epsilon //becomes constant. int numBatches=0; double areaRatioNewOldEpsilon=(M_PI*(*epsilon)*(*epsilon))/(M_PI*(*previousEpsilon)*(*previousEpsilon)); unsigned int estimatedTotalSize=(*totalDirectNeighbors)*areaRatioNewOldEpsilon*(1.0+alpha); printf("\nPrevious table size: %u, area ratio of epsilons: %f, estimated total size (incl. alpha): %u", *totalDirectNeighbors, areaRatioNewOldEpsilon, estimatedTotalSize); //to accomodate small datasets, we need smaller buffers because the pinned memory malloc is expensive if (estimatedTotalSize<(GPUBufferSize*GPUSTREAMS)) { GPUBufferSize=estimatedTotalSize/GPUSTREAMS; //but we fix the 3 streams still (thats why divide by 3). } numBatches=ceil(estimatedTotalSize*1.0/GPUBufferSize*1.0); printf("\n\nNumber of batches: %d, buffer size: %d\n\n", numBatches, GPUBufferSize); //GPU MEMORY ALLOCATION: //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey[GPUSTREAMS]; //key int * dev_pointInDistValue[GPUSTREAMS]; //value for (int i=0; i<GPUSTREAMS; i++) { errCode=hipMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } } //HOST RESULT ALLOCATION FOR THE GPU TO COPY THE DATA INTO A PINNED MEMORY ALLOCATION //ON THE HOST //pinned result set memory for the host //the number of elements are recorded for that batch in resultElemCountPerBatch //NEED PINNED MEMORY ALSO BECAUSE YOU NEED IT TO USE STREAMS IN THRUST FOR THE MEMCOPY OF THE SORTED RESULTS //PINNED MEMORY TO COPY FROM THE GPU int * pointIDKey[GPUSTREAMS]; //key int * pointInDistValue[GPUSTREAMS]; //value double tstartpinnedresults=omp_get_wtime(); for (int i=0; i<GPUSTREAMS; i++) { hipHostMalloc((void **) &pointIDKey[i], sizeof(int)*GPUBufferSize); hipHostMalloc((void **) &pointInDistValue[i], sizeof(int)*GPUBufferSize); } double tendpinnedresults=omp_get_wtime(); printf("\nTime to allocate pinned memory for results: %f", tendpinnedresults - tstartpinnedresults); // hipMalloc((void **) &pointIDKey, sizeof(int)*GPUBufferSize*NUMBATCHES); // hipMalloc((void **) &pointInDistValue, sizeof(int)*GPUBufferSize*NUMBATCHES); printf("\nmemory requested for results ON GPU (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); printf("\nmemory requested for results in MAIN MEMORY (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////// //ALLOCATE POINTERS TO INTEGER ARRAYS FOR THE VALUES FOR THE NEIGHBORTABLES /////////////////// //THE NUMBER OF POINTERS IS EQUAL TO THE NUMBER OF BATCHES for (int i=0; i<numBatches; i++) { int *ptr; struct neighborDataPtrs tmpStruct; tmpStruct.dataPtr=ptr; tmpStruct.sizeOfDataArr=0; pointersToNeighbors->push_back(tmpStruct); } /////////////////// //END ALLOCATE POINTERS TO INTEGER ARRAYS FOR THE VALUES FOR THE NEIGHBORTABLES /////////////////// ///////////////////////////////// //SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// omp_set_nested(1); omp_set_num_threads(GPUSTREAMS); ///////////////////////////////// //END SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// ///////////////////////////////// //CREATE STREAMS //////////////////////////////// hipStream_t stream[GPUSTREAMS]; for (int i=0; i<GPUSTREAMS; i++){ //hipStreamCreate(&stream[i]); hipStreamCreateWithFlags(&stream[i], hipStreamNonBlocking); } ///////////////////////////////// //END CREATE STREAMS //////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL IN BATCHES /////////////////////////////////// //since we use the strided scheme, some of the batch sizes //are off by 1 of each other, a first group of batches will //have 1 extra data point to process, and we calculate which batch numbers will //have that. The batchSize is the lower value (+1 is added to the first ones) unsigned int batchSize=(*DBSIZE)/numBatches; unsigned int batchesThatHaveOneMore=(*DBSIZE)-(batchSize*numBatches); //batch number 0- < this value have one more printf("\n\n***Batches that have one more: %u batchSize(N): %u, \n\n***",batchSize, batchesThatHaveOneMore); unsigned int totalResultsLoop=0; /* //////OLD NON-BATCHED const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks (from previous table method): %d",TOTALBLOCKS); //execute kernel hipLaunchKernelGGL(( calcNeighborsFromTableKernel), dim3(TOTALBLOCKS), dim3(BLOCKSIZE) , 0, 0, dev_N, dev_gpuLookupArray, dev_directNeighborArray, dev_cnt, dev_epsilon, dev_database, dev_pointIDKey, dev_pointInDistValue); cout <<endl<<"After kernel launch, Error code: "<<hipGetLastError()<<endl; if ( hipSuccess != hipGetLastError() ){ printf( "\nError in kernel launch (previous table method)!" ); // cout <<endl<<"Error code: "<<hipGetLastError()<<endl; } */ //FOR LOOP OVER THE NUMBER OF BATCHES STARTS HERE #pragma omp parallel for schedule(static,1) reduction(+:totalResultsLoop) for (int i=0; i<numBatches; i++) { int tid=omp_get_thread_num(); printf("\nMaking table from previous, tid: %d, starting iteration: %d",tid,i); //N NOW BECOMES THE NUMBER OF POINTS TO PROCESS PER BATCH //AS ONE THREAD PROCESSES A SINGLE POINT if (i<batchesThatHaveOneMore) { N[tid]=batchSize+1; printf("\nN: %d, tid: %d",N[tid], tid); } else { N[tid]=batchSize; printf("\nN (1 less): %d tid: %d",N[tid], tid); } //printf("\nN is: %d, tid: %d", N[tid], tid); //set relevant parameters for the batched execution that get reset //copy N to device //N IS THE NUMBER OF THREADS errCode=hipMemcpyAsync( &dev_N[tid], &N[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //the batched result set size (reset to 0): cnt[tid]=0; errCode=hipMemcpyAsync( &dev_cnt[tid], &cnt[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch //batchOffset[tid]=i*batchSize; //original batchOffset[tid]=numBatches; //for the strided errCode=hipMemcpyAsync( &dev_offset[tid], &batchOffset[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //the batch number for batching with strided batchNumber[tid]=i; errCode=hipMemcpyAsync( &dev_batchNumber[tid], &batchNumber[tid], sizeof(unsigned int), hipMemcpyHostToDevice, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } const int TOTALBLOCKS=ceil((1.0*(N[tid]))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //0 is shared memory pool hipLaunchKernelGGL(( calcNeighborsFromTableKernelBatches), dim3(TOTALBLOCKS), dim3(BLOCKSIZE), 0, stream[tid] , &dev_N[tid], &dev_offset[tid], &dev_batchNumber[tid], dev_gpuLookupArray, dev_directNeighborArray, &dev_cnt[tid], dev_epsilon, dev_database, dev_pointIDKey[tid], dev_pointInDistValue[tid]); cout <<"\n\nKERNEL LAUNCH RETURN: "<<hipGetLastError()<<endl<<endl; if ( hipSuccess != hipGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<hipSuccess<<endl<<endl; } // find the size of the number of results errCode=hipMemcpyAsync( &cnt[tid], &dev_cnt[tid], sizeof(unsigned int), hipMemcpyDeviceToHost, stream[tid] ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\n\nGPU: result set size within epsilon (CONSTRUCT FROM PREVIOUS NEIGHBORTABLE BATCHES): %d\n\n",cnt[tid]); } //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey[tid]); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue[tid]); //XXXXXXXXXXXXXXXX //THRUST USING STREAMS REQUIRES THRUST V1.8 //SEEMS TO BE WORKING :) //XXXXXXXXXXXXXXXX try{ thrust::sort_by_key(thrust::hip::par.on(stream[tid]), dev_keys_ptr, dev_keys_ptr + cnt[tid], dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //thrust with streams into individual buffers for each batch hipMemcpyAsync(thrust::raw_pointer_cast(pointIDKey[tid]), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); hipMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue[tid]), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), hipMemcpyDeviceToHost, stream[tid]); //need to make sure the data is copied before constructing portion of the neighbor table hipStreamSynchronize(stream[tid]); //construct portion of the table: double tableconstuctstart=omp_get_wtime(); //constructNeighborTableKeyValue(pointIDKey[tid], pointInDistValue[tid], neighborTable, &cnt[tid]); //set the number of neighbors in the pointer struct: (*pointersToNeighbors)[i].sizeOfDataArr=cnt[tid]; (*pointersToNeighbors)[i].dataPtr=new int[cnt[tid]]; constructNeighborTableKeyValueAlternateTest(pointIDKey[tid], pointInDistValue[tid], neighborTable, (*pointersToNeighbors)[i].dataPtr, &cnt[tid]); double tableconstuctend=omp_get_wtime(); //cout <<"\nIn neighbortable from previous table. Data array ptr: "<<(*pointersToNeighbors)[i].dataPtr<<" , size of data array: "<<(*pointersToNeighbors)[i].sizeOfDataArr;cout.flush(); printf("\nTable construct time: %f", tableconstuctend - tableconstuctstart); //add the batched result set size to the total count totalResultsLoop+=cnt[tid]; printf("\nRunning total of total size of result array, tid: %d: %u", tid, totalResultsLoop); //} } //END LOOP OVER THE GPU BATCHES printf("\nTOTAL RESULT SET SIZE ON HOST: %d", totalResultsLoop); *totalNeighbors=totalResultsLoop; double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); /////////////////////////////// //END EXECUTE KERNEL /////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results //errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); // if(errCode != hipSuccess) { // cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; // } // else // { // printf("\nGPU: result set size on within epsilon: %d",*cnt); // } /* errCode=hipMemcpy( cnt, dev_cnt, sizeof(unsigned int), hipMemcpyDeviceToHost ); if(errCode != hipSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size on GPU within epsilon (from precomputed table method): %d",*cnt); } */ //copy the results, but only transfer the number of results, not the entire buffer // errCode=hipMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), hipMemcpyDeviceToHost ); // if(errCode != hipSuccess) { // cout << "\nError: getting results from GPU (from the precomputed table) Got error with code " << errCode << endl; // } //*totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// double tFreeStart=omp_get_wtime(); ///////////////// //FREE ///////////////// for (int i=0; i<GPUSTREAMS; i++) { errCode=hipStreamDestroy(stream[i]); if(errCode != hipSuccess) { cout << "\nError: destroying stream" << errCode << endl; } } hipFree(dev_directNeighborArray); hipFree(dev_gpuLookupArray); //hipFree(dev_results); hipFree(dev_database); hipFree(dev_epsilon); hipFree(dev_N); hipFree(dev_cnt); hipFree(dev_offset); hipFree(dev_batchNumber); //free data related to the individual streams for each batch for (int i=0; i<GPUSTREAMS; i++) { //free the data on the device hipFree(dev_pointIDKey[i]); hipFree(dev_pointInDistValue[i]); //free on the host hipHostFree(pointIDKey[i]); hipHostFree(pointInDistValue[i]); } hipFree(dev_pointIDKey); hipFree(dev_pointInDistValue); //free pinned memory on host hipHostFree(pointIDKey); hipHostFree(pointInDistValue); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); cout<<"\n** last error at end of fn construct table batches: "<<hipGetLastError(); return true; } /* //METHOD TO COPY THE DATABASE TO THE GPU: //takes as input: //the imported points, but which include extraneous information (tec, time) //a pointer to the database on the GPU void copyDatabaseToGPU(std::vector<struct dataElem> * dataPoints, struct point * dev_database) { //CUDA error code: hipError_t errCode; // unsigned int N=dataPoints->size(); struct point * database; database=(struct point*)malloc(sizeof(struct point)*N); dev_database=(struct point*)malloc(sizeof(struct point)*N); //allocate memory on device: errCode=hipMalloc( (void**)&dev_database, sizeof(struct point)*N ); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; //2 means not enough memory } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<N; i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=hipMemcpy(dev_database, database, sizeof(struct point)*N, hipMemcpyHostToDevice); if(errCode != hipSuccess) { cout << "\nError: database Got error with code " << errCode << endl; //2 means not enough memory } } //METHOD TO SET THE KERNEL PARAMETERS void setKernelParams(unsigned int * dev_N, unsigned int * N, unsigned int * dev_debug1, unsigned int * dev_debug2, unsigned int *dev_cnt, double * dev_epsilon, double * epsilon) { //CUDA error code: hipError_t errCode; //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; //unsigned int *dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //printf("\ndev cnt in fn: %u",*dev_cnt);cout.flush(); //allocate on the device errCode=hipMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; //2 means not enough memory } //double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); *dev_epsilon=*epsilon; //allocate on the device errCode=hipMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; //2 means not enough memory } //size of the database: dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //*dev_N=N; //allocate on the device errCode=hipMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; //2 means not enough memory } //debug values //unsigned int * dev_debug1; //unsigned int * dev_debug2; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; //allocate on the device errCode=hipMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; //2 means not enough memory } errCode=hipMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != hipSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; //2 means not enough memory } //copy N, epsilon and cnt to the device //epsilon errCode=hipMemcpy( dev_epsilon, epsilon, sizeof(double), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; //2 means not enough memory } //cnt // errCode=hipMemcpy( dev_cnt, cnt, sizeof(unsigned int), hipMemcpyHostToDevice ); // if(errCode != hipSuccess) { // cout << "\nError: cnt Got error with code " << errCode << endl; //2 means not enough memory // } //N errCode=hipMemcpy( dev_N, N, sizeof(unsigned int), hipMemcpyHostToDevice ); if(errCode != hipSuccess) { cout << "\nError: N Got error with code " << errCode << endl; //2 means not enough memory } //printf("\nnumber of elements: %u,%u",*dev_N,N); } void allocateResultSet(struct structresults * dev_results, struct structresults * results) { //dev_results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); hipError_t errCode=hipMalloc((void **)&dev_results, sizeof(struct structresults)*BUFFERELEM); if(errCode != hipSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(struct structresults)*BUFFERELEM)/(1024*1024*1024)); //host result allocation: results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); } */ bool compResults(structresults const& lhs, structresults const& rhs) { if (lhs.pointID != rhs.pointID) return (lhs.pointID < rhs.pointID); if (lhs.pointInDist != rhs.pointInDist) { return (lhs.pointInDist < rhs.pointInDist); } return (lhs.pointInDist > rhs.pointInDist); }
09049d3860d6db7ef83e6c6298a992bdc123bf84.cu
//The MIT License (MIT) //Copyright (c) 2016 Massachusetts Institute of Technology //Authors: Mike Gowanlock //This software has been created in projects supported by the US National //Science Foundation and NASA (PI: Pankratius, NSF ACI-1442997, NASA AIST-NNX15AG84G) //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files (the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions: //The above copyright notice and this permission notice shall be included in //all copies or substantial portions of the Software. //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN //THE SOFTWARE. //precompute direct neighbors with the GPU: #include <cuda_runtime.h> #include <cuda.h> #include "structs.h" #include <stdio.h> #include "kernel.h" #include <math.h> #include "GPU.h" #include <algorithm> #include "omp.h" #include <queue> //thrust #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <thrust/system/cuda/execution_policy.h> //for streams for thrust (added with Thrust v1.8) //elements for the result set //FOR A SINGLE KERNEL INVOCATION //NOT FOR THE BATCHED ONE #define BUFFERELEM 300000000 //400000000-original (when removing the data from the device before putting it back for the sort) //FOR THE BATCHED EXECUTION: //#define BATCHTOTALELEM 1200000000 //THE TOTAL SIZE ALLOCATED ON THE HOST //THE NUMBER OF BATCHES AND THE SIZE OF THE BUFFER FOR EACH KERNEL EXECUTION ARE NOT RELATED TO THE TOTAL NUMBER //OF ELEMENTS (ABOVE). #define NUMBATCHES 20 #define BATCHBUFFERELEM 100000000 //THE SMALLER SIZE ALLOCATED ON THE DEVICE FOR EACH KERNEL EXECUTION #define GPUSTREAMS 3 //number of concurrent gpu streams using namespace std; //Uses the grid index to compute the direct neighbor table //uses shared memory //each grid cell is processed by a block of threads //IN THIS ONE, WE PASS INTO THE GPU THE MAXIMUM AMOUNT OF SHARED MEMORY REQUIRED TO STORE THE OVERLAPPING //DATA ELEMENTS void makeDistanceTableGPUGridIndexWithSMBlockDataAware(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, int * lookupArr, struct table * neighborTable, int * totalNeighbors, unsigned int maxNumSMDataItems) { //CUDA error code: cudaError_t errCode; /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in main GPU method: N is: %u",*N);cout.flush(); //pinned memory for the database: struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*N)); //dont use pinned memory for the database, its slower than using cudaMalloc //cudaMallocHost((void **) &database, sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); printf("\n !!in main GPU method: N is: %u",*N);cout.flush(); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// // //test print the index // for (int i=0; i<(*gridNumXCells)*(*gridNumYCells); i++) // { // printf("\nCell %d: min: %d, max: %d", i, index[i].indexmin, index[i].indexmax); // } int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != cudaSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=cudaMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// //test print the lookup array: // for (int i=0; i<*N; i++) // { // printf("\nlookup %d: %d",i, lookupArr[i]); // } int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_lookupArr, sizeof(int)*(*N)); if(errCode != cudaSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=cudaMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// //NON-PINNED MEMORY FOR SINGLE KERNEL INVOCATION (NO BATCHING) //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=cudaMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // cudaMallocHost((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // cudaMallocHost((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //epsilon errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //size of the database: unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //N (DATASET SIZE) errCode=cudaMemcpy( dev_N, N, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //THE NUMBER OF THREADS //The number of threads is the blocksize * the number of grid cells //Therefore, each data item is not assigned to a single thread unsigned int * numgputhreads; numgputhreads=(unsigned int*)malloc(sizeof(unsigned int)); *numgputhreads=totalGridCells*BLOCKSIZE; unsigned int * dev_numThreads; dev_numThreads=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_numThreads, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: dev_numThreads Got error with code " << errCode << endl; } //Number of threads errCode=cudaMemcpy( dev_numThreads, numgputhreads, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_numThreads Got error with code " << errCode << endl; } //THE AMOUNT OF SHARED MEMORY REQUIRED TO STORE THE OVERLAPPING POINTS OF A GIVEN ORIGIN CELL unsigned int * elemsSM; elemsSM=(unsigned int*)malloc(sizeof(unsigned int)); *elemsSM=maxNumSMDataItems; unsigned int * dev_elemsSM; dev_elemsSM=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_elemsSM, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: dev_elemsSM Got error with code " << errCode << endl; } //SHARED MEMORY ELEMENTS errCode=cudaMemcpy( dev_elemsSM, elemsSM, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_elemsSM Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=cudaMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=cudaMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=cudaMemcpy( dev_debug1, debug1, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_debug2, debug2, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } /////////////////////////////////// //END SET OTHER KERNEL PARAMETERS /////////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL /////////////////////////////////// //the total blocks is the number of grid cells const int TOTALBLOCKS=totalGridCells; printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //The third parameter in the kernel allocation is for dynamic shared memory. //We need shared memory for 3 arrays (2x doubles and 1x int) const int SIZE_SM=(2*(*elemsSM)*sizeof(double))+((*elemsSM)*sizeof(int)); printf("\nMemory requested for DYNAMIC shared memory (kb): %f",SIZE_SM/1024.0); kernelGridIndexSMBlockDataAware<<< TOTALBLOCKS, BLOCKSIZE, SIZE_SM >>>(dev_numThreads, dev_N, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt, dev_database, dev_elemsSM, dev_pointIDKey, dev_pointInDistValue); if ( cudaSuccess != cudaGetLastError() ){ printf( "\n\nERROR IN KERNEL LAUNCH!\nMIGHT BE TOO MUCH DYNAMIC SHARED MEMORY REQUESTED\n\n" ); } /////////////////////////////////// //END LAUNCH KERNEL /////////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",*cnt); } /* //copy the results, but only transfer the number of results, not the entire buffer errCode=cudaMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting results from GPU Got error with code " << errCode << endl; } */ printf("\nIn block GPU method, Count is: %d",*cnt); *totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) errCode=cudaMemcpy(debug1, dev_debug1, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=cudaMemcpy(debug2, dev_debug2, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: cudaFree(dev_N); cudaFree(dev_numThreads); cudaFree(dev_database); cudaFree(dev_debug1); cudaFree(dev_debug2); cudaFree(dev_cnt); cudaFree(dev_epsilon); //cudaFree(dev_results); cudaFree(dev_grid); cudaFree(dev_lookupArr); cudaFree(dev_gridNumXCells); cudaFree(dev_gridNumYCells); cudaFree(dev_gridMin_x); cudaFree(dev_gridMin_y); //////////////////////////////////// //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //Uses the grid index to compute the direct neighbor table //uses shared memory //each grid cell is processed by a block of threads (set at compile time) void makeDistanceTableGPUGridIndexWithSMBlockDataOblivious(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, int * numNonEmptyCells, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, unsigned int * lookupArr, struct table * neighborTable, int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); //CUDA error code: cudaError_t errCode; /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in main GPU method: N is: %u",*N);cout.flush(); //pinned memory for the database: struct point * database; //dont use pinned memory for the database, its slower than using cudaMalloc database=(struct point*)malloc(sizeof(struct point)*(*N)); //cudaMallocHost((void **) &database, sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); printf("\n !!in main GPU method: N is: %u",*N);cout.flush(); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// // //test print the index // for (int i=0; i<(*gridNumXCells)*(*gridNumYCells); i++) // { // printf("\nCell %d: min: %d, max: %d", i, index[i].indexmin, index[i].indexmax); // } int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != cudaSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=cudaMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// //test print the lookup array: // for (int i=0; i<*N; i++) // { // printf("\nlookup %d: %d",i, lookupArr[i]); // } int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_lookupArr, sizeof(int)*(*N)); if(errCode != cudaSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=cudaMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// //NON-PINNED MEMORY FOR SINGLE KERNEL INVOCATION (NO BATCHING) //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=cudaMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // cudaMallocHost((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // cudaMallocHost((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //epsilon errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //size of the database: unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //N (DATASET SIZE) errCode=cudaMemcpy( dev_N, N, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //THE NUMBER OF THREADS //The number of threads is the blocksize * the number of non-empty grid cells //Therefore, each data item is not assigned to a single thread unsigned int * numgputhreads; numgputhreads=(unsigned int*)malloc(sizeof(unsigned int)); *numgputhreads=(*numNonEmptyCells)*BLOCKSIZE; unsigned int * dev_numThreads; dev_numThreads=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_numThreads, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: dev_numThreads Got error with code " << errCode << endl; } //Number of threads errCode=cudaMemcpy( dev_numThreads, numgputhreads, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_numThreads Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=cudaMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=cudaMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=cudaMemcpy( dev_debug1, debug1, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_debug2, debug2, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } //////////////////////////////////////////// //the schedule //an array that tells each block what grid id to process //that way we only request the number of blocks that correspond to the number of non-empty cells unsigned int * schedule; schedule=(unsigned int*)malloc(sizeof(unsigned int)*(*numNonEmptyCells)); int nonemptycnt=0; for (int i=0; i<totalGridCells; i++) { if (index[i].indexmin!=-1) { schedule[nonemptycnt]=i; nonemptycnt++; } } unsigned int * dev_schedule; dev_schedule=(unsigned int*)malloc(sizeof(unsigned int)*(*numNonEmptyCells)); //allocate on the device errCode=cudaMalloc((void**)&dev_schedule, sizeof(unsigned int)*(*numNonEmptyCells)); if(errCode != cudaSuccess) { cout << "\nError: dev_schedule Got error with code " << errCode << endl; } //copy the schedule errCode=cudaMemcpy( dev_schedule, schedule, sizeof(unsigned int)*(*numNonEmptyCells), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_schedule Got error with code " << errCode << endl; } //////////////////////////// //END THE SCHEDULE //////////////////////////// /////////////////////////////////// //END SET OTHER KERNEL PARAMETERS /////////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL /////////////////////////////////// //the total blocks is the number of grid cells const int TOTALBLOCKS=(*numNonEmptyCells); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel kernelGridIndexSMBlock<<< TOTALBLOCKS, BLOCKSIZE >>>(dev_numThreads, dev_N, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt, dev_database, dev_schedule, dev_pointIDKey, dev_pointInDistValue); if ( cudaSuccess != cudaGetLastError() ){ printf( "Error in kernel launch!\n" ); } /////////////////////////////////// //END LAUNCH KERNEL /////////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",*cnt); } //copy the results, but only transfer the number of results, not the entire buffer /* errCode=cudaMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting results from GPU Got error with code " << errCode << endl; } */ printf("\nIn block GPU method, Count is: %d",*cnt); *totalNeighbors=(*cnt); double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX errCode=cudaMemcpy(debug1, dev_debug1, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=cudaMemcpy(debug2, dev_debug2, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: cudaFree(dev_N); cudaFree(dev_database); cudaFree(dev_debug1); cudaFree(dev_debug2); cudaFree(dev_cnt); cudaFree(dev_epsilon); // cudaFree(dev_results); cudaFree(dev_grid); cudaFree(dev_lookupArr); cudaFree(dev_gridNumXCells); cudaFree(dev_gridNumYCells); cudaFree(dev_gridMin_x); cudaFree(dev_gridMin_y); cudaFree(dev_numThreads); cudaFree(dev_schedule); //////////////////////////////////// //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //Uses the grid index to compute the direct neighbor table //NO SHARED MEMORY PAGING void makeDistanceTableGPUGridIndex(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, unsigned int * lookupArr, struct table * neighborTable, int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); //CUDA error code: cudaError_t errCode; /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in main GPU method: N is: %u",*N);cout.flush(); struct point * database; //pinned memory for the database: database=(struct point*)malloc(sizeof(struct point)*(*N)); //dont use pinned memory for the database, its slower than using cudaMalloc //cudaMallocHost((void **) &database, sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); printf("\n !!in main GPU method: N is: %u",*N);cout.flush(); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// // //test print the index // for (int i=0; i<(*gridNumXCells)*(*gridNumYCells); i++) // { // printf("\nCell %d: min: %d, max: %d", i, index[i].indexmin, index[i].indexmax); // } int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != cudaSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=cudaMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// //test print the lookup array: // for (int i=0; i<*N; i++) // { // printf("\nlookup %d: %d",i, lookupArr[i]); // } int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_lookupArr, sizeof(int)*(*N)); if(errCode != cudaSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=cudaMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// //ORIGINAL, TESTING PINNED MEMORY /* struct structresults * dev_results; struct structresults * results; errCode=cudaMalloc((void **)&dev_results, sizeof(struct structresults)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(struct structresults)*BUFFERELEM)/(1024*1024*1024)); //host result allocation: results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); */ //PINNED MEMORY FOR THE RESULT SET /* struct structresults * dev_results; struct structresults * results; errCode=cudaMalloc((void **)&dev_results, sizeof(struct structresults)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(struct structresults)*BUFFERELEM)/(1024*1024*1024)); //host result allocation: //results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); //pinned result set memory for the host cudaMallocHost((void **) &results, sizeof(struct structresults)*BUFFERELEM); */ //NON-PINNED MEMORY FOR SINGLE KERNEL INVOCATION (NO BATCHING) //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=cudaMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // cudaMallocHost((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // cudaMallocHost((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //copy to device errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //size of the database: unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //copy N to device errCode=cudaMemcpy( dev_N, N, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } printf("\n\nMODIFIED THIS FUNCTION TO ADD THE OFFSET FOR BATCHING AND THE BATCH NUMBER\n\n"); printf("\nWITH A SINGLE BATCH-- THE BATCH OFFSET IS SET TO 1 AND THE BATCH NUMBER IS SET TO 0."); //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof( unsigned int )); *batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_offset, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch *batchOffset=1; errCode=cudaMemcpy( dev_offset, batchOffset, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)); *batchNumber=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)); //allocate on the device errCode=cudaMalloc((void**)&dev_batchNumber, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_batchNumber, batchNumber, sizeof(unsigned int), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=cudaMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=cudaMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=cudaMemcpy( dev_debug1, debug1, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_debug2, debug2, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } /////////////////////////////////// //END SET OTHER KERNEL PARAMETERS /////////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL /////////////////////////////////// const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel kernelGridIndex<<< TOTALBLOCKS, BLOCKSIZE >>>(dev_N, dev_offset, dev_batchNumber, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt, dev_database, dev_pointIDKey, dev_pointInDistValue); // errCode=cudaDeviceSynchronize(); // cout <<"\n\nError from device synchronize: "<<errCode; cout <<"\n\nKERNEL LAUNCH RETURN: "<<cudaGetLastError()<<endl<<endl; if ( cudaSuccess != cudaGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<cudaSuccess<<endl<<endl; } /////////////////////////////////// //END LAUNCH KERNEL /////////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //dont get the result set because we leave it on the device for sorting //without transfering back to the host //first find the size of the number of results errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",*cnt); } //copy the results, but only transfer the number of results, not the entire buffer /* errCode=cudaMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting results from GPU Got error with code " << errCode << endl; } */ *totalNeighbors=(*cnt); double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) // unsigned int * debug1; // debug1=(unsigned int*)malloc(sizeof(unsigned int)); // *debug1=0; // unsigned int * debug2; // debug2=(unsigned int*)malloc(sizeof(unsigned int)); // *debug2=0; double tStartdebug=omp_get_wtime(); errCode=cudaMemcpy(debug1, dev_debug1, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=cudaMemcpy(debug2, dev_debug2, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } double tEnddebug=omp_get_wtime(); printf("\nTime to retrieve debug values: %f", tEnddebug - tStartdebug); /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: double tFreeStart=omp_get_wtime(); cudaFree(dev_N); cudaFree(dev_database); cudaFree(dev_debug1); cudaFree(dev_debug2); cudaFree(dev_cnt); cudaFree(dev_epsilon); //cudaFree(dev_results); cudaFree(dev_grid); cudaFree(dev_lookupArr); cudaFree(dev_gridNumXCells); cudaFree(dev_gridNumYCells); cudaFree(dev_gridMin_x); cudaFree(dev_gridMin_y); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); //////////////////////////////////// //cudaDeviceSynchronize(); //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); // allocate space for the output //thrust::device_vector<int> sortedKeys(*cnt); //thrust::device_vector<int> sortedVals(*cnt); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); /* double tstartsort=omp_get_wtime(); //make a host vector initialized with the results that have been transfered from the GPU double sort_test1_start=omp_get_wtime(); //TESTING thrust::host_vector<structresults> hVectResults(results,results+(*cnt)); double sort_test1_end=omp_get_wtime(); //TESTING printf("\n Time to create the host vector: %f", sort_test1_end - sort_test1_start); //TESTING // for (int i=0;i<numResults;i++) // { // printf("\n host vector: %d, %d",hVectResults[i].pointID,hVectResults[i].pointInDist); // } // for (int i=0; i<numResults; i++) // { // structresults tmp; // tmp.pointID=0; // tmp.pointInDist=0; // hVectResults.push_back(tmp); // } //Now transfer the hostvector to the device: double sort_test2_start=omp_get_wtime(); //TESTING thrust::device_vector<structresults> dVectResults=hVectResults; double sort_test2_end=omp_get_wtime(); //TESTING printf("\n Time to create the device vector: %f", sort_test2_end - sort_test2_start); //TESTING //sort the device vector on the GPU try{ thrust::sort(dVectResults.begin(), dVectResults.end(),compareThrust()); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } // transfer the sorted results back to host thrust::copy(dVectResults.begin(), dVectResults.end(), hVectResults.begin()); double tendsort=omp_get_wtime(); printf("\nTime to sort on the GPU (grid index): %f",tendsort-tstartsort); */ //print GPU: // for (int i=0; i<(*cnt);i++) // { // printf("\nGPU elem: %d, data: %d",hVectResults[i].pointID,hVectResults[i].pointInDist); // } //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //In this function we batch the results off of the GPU to accomodate larger epsilon values //The results that come from the GPU are in the form of key/value pairs (in two arrays) //Key-a point, Value-a point within epsilon of the key //The batches are mapped to differing streams //Each batch requires its own memory space for the result set //So the number of buffers on the GPU for the results is the number of streams (GPUSTREAMS) //On the host, we use the same size buffers, and number of them, and then build part of the neighbor table with the batch //This is an alternative to making one large array from all of the batches, which would require a large //pinned cuda malloc which is very expensive. It also allows for multiple threads to concurrently build the //neighbor table and interleave GPU work with work on the CPU //Also, the number of batches is estimated by calling a kernel that samples the number of neighbours (1%) and then //estimates the total neighbors, which is used to calculate the total number of batches //To make sure each batch doesn't vary much, we use a strided scheme for each batch //Uses the grid index to compute the direct neighbor table //NO SHARED MEMORY PAGING void makeDistanceTableGPUGridIndexBatches(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, unsigned int * lookupArr, struct table * neighborTable, unsigned int * totalNeighbors) { //testing new neighbortable: struct neighborTableLookup * newNeighborTable; newNeighborTable=new neighborTableLookup[dataPoints->size()]; double tKernelResultsStart=omp_get_wtime(); //CUDA error code: cudaError_t errCode; cout<<"\n** last error start of fn: "<<cudaGetLastError(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * DBSIZE; DBSIZE=(unsigned int*)malloc(sizeof(unsigned int)); *DBSIZE=dataPoints->size(); printf("\n in main GPU method: DBSIZE is: %u",*DBSIZE);cout.flush(); struct point * database; //pinned memory for the database: database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //dont use pinned memory for the database, its slower than using cudaMalloc //cudaMallocHost((void **) &database, sizeof(struct point)*(*DBSIZE)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*DBSIZE)); printf("\n !!in main GPU method: DBSIZE is: %u",*DBSIZE);cout.flush(); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database //we do this because the data points struct may contain other values than x and y for (int i=0; i<(*DBSIZE); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*DBSIZE), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// // //test print the index // for (int i=0; i<(*gridNumXCells)*(*gridNumYCells); i++) // { // printf("\nCell %d: min: %d, max: %d", i, index[i].indexmin, index[i].indexmax); // } int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != cudaSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=cudaMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } printf("\nSize of index sent to GPU (GiB): %f", (double)sizeof(struct grid)*totalGridCells/(1024.0*1024.0*1024.0)); /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*DBSIZE)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_lookupArr, sizeof(int)*(*DBSIZE)); if(errCode != cudaSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=cudaMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*DBSIZE), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //total size of the result set as it's batched //this isnt sent to the GPU unsigned int * totalResultSetCnt; totalResultSetCnt=(unsigned int*)malloc(sizeof(unsigned int)); *totalResultSetCnt=0; //count values - for an individual kernel launch //need different count values for each stream unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } // errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); // if(errCode != cudaSuccess) { // cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; // } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //copy to device errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //number of threads per gpu stream //THE NUMBER OF THREADS THAT ARE LAUNCHED IN A SINGLE KERNEL INVOCATION //CAN BE FEWER THAN THE NUMBER OF ELEMENTS IN THE DATABASE IF MORE THAN 1 BATCH unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: dev_N Got error with code " << errCode << endl; } //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_offset, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_batchNumber, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=cudaMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=cudaMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=cudaMemcpy( dev_debug1, debug1, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_debug2, debug2, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } ////////////////////////////////////////////////////////// //ESTIMATE THE BUFFER SIZE AND NUMBER OF BATCHES ETC BY COUNTING THE NUMBER OF RESULTS //TAKE A SAMPLE OF THE DATA POINTS, NOT ALL OF THEM //Use sampleRate for this ///////////////////////////////////////////////////////// printf("\n\n***********************************\nEstimating Batches:"); //Parameters for the batch size estimation. double sampleRate=0.01; //sample 1% of the points in the dataset sampleRate=0.01. //Sample the entire dataset(no sampling) sampleRate=1 int offsetRate=1.0/sampleRate; printf("\nOffset: %d", offsetRate); ///////////////// //N-threads //////////////// double tstartbatchest=omp_get_wtime(); unsigned int * dev_N_batchEst; dev_N_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); unsigned int * N_batchEst; N_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); *N_batchEst=*DBSIZE*sampleRate; //allocate on the device errCode=cudaMalloc((void**)&dev_N_batchEst, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: dev_N_batchEst Got error with code " << errCode << endl; } //copy N to device //N IS THE NUMBER OF THREADS errCode=cudaMemcpy( dev_N_batchEst, N_batchEst, sizeof(unsigned int), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: N batchEST Got error with code " << errCode << endl; } ///////////// //count the result set size //////////// unsigned int * dev_cnt_batchEst; dev_cnt_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); unsigned int * cnt_batchEst; cnt_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); *cnt_batchEst=0; //allocate on the device errCode=cudaMalloc((void**)&dev_cnt_batchEst, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } //copy cnt to device errCode=cudaMemcpy( dev_cnt_batchEst, cnt_batchEst, sizeof(unsigned int), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } ////////////////// //SAMPLE OFFSET - TO SAMPLE THE DATA TO ESTIMATE THE TOTAL NUMBER OF KEY VALUE PAIRS ///////////////// //offset into the database when batching the results unsigned int * sampleOffset; sampleOffset=(unsigned int*)malloc(sizeof(unsigned int)); *sampleOffset=offsetRate; unsigned int * dev_sampleOffset; dev_sampleOffset=(unsigned int*)malloc(sizeof(unsigned int)); //allocate on the device errCode=cudaMalloc((void**)&dev_sampleOffset, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: sample offset Got error with code " << errCode << endl; } //copy offset to device errCode=cudaMemcpy( dev_sampleOffset, sampleOffset, sizeof(unsigned int), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } const int TOTALBLOCKSBATCHEST=ceil((1.0*(*DBSIZE)*sampleRate)/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKSBATCHEST); kernelGridIndexBatchEstimator<<< TOTALBLOCKSBATCHEST, BLOCKSIZE>>>(dev_N_batchEst, dev_sampleOffset, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt_batchEst, dev_database); cout<<"\n** ERROR FROM KERNEL LAUNCH OF BATCH ESTIMATOR: "<<cudaGetLastError(); // find the size of the number of results errCode=cudaMemcpy( cnt_batchEst, dev_cnt_batchEst, sizeof(unsigned int), cudaMemcpyDeviceToHost); if(errCode != cudaSuccess) { cout << "\nError: getting cnt for batch estimate from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size for estimating the number of batches (sampled): %u",*cnt_batchEst); } cudaFree(dev_cnt_batchEst); cudaFree(dev_N_batchEst); cudaFree(dev_sampleOffset); double tendbatchest=omp_get_wtime(); printf("\nTime to get the total result set size from batch estimator: %f",tendbatchest-tstartbatchest); //WE CALCULATE THE BUFFER SIZES AND NUMBER OF BATCHES unsigned int GPUBufferSize=100000000; double alpha=0.05; //overestimation factor unsigned long long estimatedTotalSize=(unsigned long long)(*cnt_batchEst)*(unsigned long long)offsetRate; unsigned long long estimatedTotalSizeWithAlpha=(unsigned long long)(*cnt_batchEst)*(unsigned long long)offsetRate*(1.0+(alpha)); printf("\nEstimated total result set size: %llu", estimatedTotalSize); printf("\nEstimated total result set size (with Alpha %f): %llu", alpha,estimatedTotalSizeWithAlpha); //to accomodate small datasets, we need smaller buffers because the pinned memory malloc is expensive if (estimatedTotalSize<(GPUBufferSize*GPUSTREAMS)) { GPUBufferSize=estimatedTotalSize*(1.0+(alpha*2.0))/(GPUSTREAMS); //we do 2*alpha for small datasets because the //sampling will be worse for small datasets //but we fix the 3 streams still (thats why divide by 3). } unsigned int numBatches=ceil(((1.0+alpha)*estimatedTotalSize*1.0)/(GPUBufferSize*1.0)); printf("\nNumber of batches: %d, buffer size: %d", numBatches, GPUBufferSize); printf("\nEnd Batch Estimator\n***********************************\n"); ///////////////////////////////////////////////////////// //END BATCH ESTIMATOR ///////////////////////////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET USING THE BATCH ESTIMATOR /////////////////////////////////// //NEED BUFFERS ON THE GPU AND THE HOST FOR THE NUMBER OF CONCURRENT STREAMS //GPU BUFFER ON THE DEVICE //BUFFER ON THE HOST WITH PINNED MEMORY FOR FAST MEMCPY //BUFFER ON THE HOST TO DUMP THE RESULTS OF BATCHES SO THAT GPU THREADS CAN CONTINUE //EXECUTING STREAMS ON THE HOST //GPU MEMORY ALLOCATION: //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey[GPUSTREAMS]; //key int * dev_pointInDistValue[GPUSTREAMS]; //value for (int i=0; i<GPUSTREAMS; i++) { errCode=cudaMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } } //HOST RESULT ALLOCATION FOR THE GPU TO COPY THE DATA INTO A PINNED MEMORY ALLOCATION //ON THE HOST //pinned result set memory for the host //the number of elements are recorded for that batch in resultElemCountPerBatch //NEED PINNED MEMORY ALSO BECAUSE YOU NEED IT TO USE STREAMS IN THRUST FOR THE MEMCOPY OF THE SORTED RESULTS //PINNED MEMORY TO COPY FROM THE GPU int * pointIDKey[GPUSTREAMS]; //key int * pointInDistValue[GPUSTREAMS]; //value double tstartpinnedresults=omp_get_wtime(); for (int i=0; i<GPUSTREAMS; i++) { cudaMallocHost((void **) &pointIDKey[i], sizeof(int)*GPUBufferSize); cudaMallocHost((void **) &pointInDistValue[i], sizeof(int)*GPUBufferSize); } double tendpinnedresults=omp_get_wtime(); printf("\nTime to allocate pinned memory for results: %f", tendpinnedresults - tstartpinnedresults); // cudaMalloc((void **) &pointIDKey, sizeof(int)*GPUBufferSize*NUMBATCHES); // cudaMalloc((void **) &pointInDistValue, sizeof(int)*GPUBufferSize*NUMBATCHES); printf("\nmemory requested for results ON GPU (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); printf("\nmemory requested for results in MAIN MEMORY (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// ///////////////////////////////// //SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// omp_set_num_threads(GPUSTREAMS); ///////////////////////////////// //END SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// ///////////////////////////////// //CREATE STREAMS //////////////////////////////// cudaStream_t stream[GPUSTREAMS]; for (int i=0; i<GPUSTREAMS; i++){ //cudaStreamCreate(&stream[i]); cudaStreamCreateWithFlags(&stream[i], cudaStreamNonBlocking); } ///////////////////////////////// //END CREATE STREAMS //////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL IN BATCHES /////////////////////////////////// //since we use the strided scheme, some of the batch sizes //are off by 1 of each other, a first group of batches will //have 1 extra data point to process, and we calculate which batch numbers will //have that. The batchSize is the lower value (+1 is added to the first ones) unsigned int batchSize=(*DBSIZE)/numBatches; unsigned int batchesThatHaveOneMore=(*DBSIZE)-(batchSize*numBatches); //batch number 0- < this value have one more printf("\n\n***Batches that have one more: %u batchSize(N): %u, \n\n***",batchSize, batchesThatHaveOneMore); unsigned int totalResultsLoop=0; //FOR LOOP OVER THE NUMBER OF BATCHES STARTS HERE #pragma omp parallel for schedule(static,1) reduction(+:totalResultsLoop) num_threads(GPUSTREAMS) for (int i=0; i<numBatches; i++) { int tid=omp_get_thread_num(); printf("\ntid: %d, starting iteration: %d",tid,i); //N NOW BECOMES THE NUMBER OF POINTS TO PROCESS PER BATCH //AS ONE THREAD PROCESSES A SINGLE POINT if (i<batchesThatHaveOneMore) { N[tid]=batchSize+1; printf("\nN: %d, tid: %d",N[tid], tid); } else { N[tid]=batchSize; printf("\nN (1 less): %d tid: %d",N[tid], tid); } //set relevant parameters for the batched execution that get reset //copy N to device //N IS THE NUMBER OF THREADS errCode=cudaMemcpyAsync( &dev_N[tid], &N[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //the batched result set size (reset to 0): cnt[tid]=0; errCode=cudaMemcpyAsync( &dev_cnt[tid], &cnt[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch //batchOffset[tid]=i*batchSize; //original batchOffset[tid]=numBatches; //for the strided errCode=cudaMemcpyAsync( &dev_offset[tid], &batchOffset[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //the batch number for batching with strided batchNumber[tid]=i; errCode=cudaMemcpyAsync( &dev_batchNumber[tid], &batchNumber[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } const int TOTALBLOCKS=ceil((1.0*(N[tid]))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //0 is shared memory pool kernelGridIndex<<< TOTALBLOCKS, BLOCKSIZE, 0, stream[tid] >>>(&dev_N[tid], &dev_offset[tid], &dev_batchNumber[tid], dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, &dev_cnt[tid], dev_database, dev_pointIDKey[tid], dev_pointInDistValue[tid]); // errCode=cudaDeviceSynchronize(); // cout <<"\n\nError from device synchronize: "<<errCode; cout <<"\n\nKERNEL LAUNCH RETURN: "<<cudaGetLastError()<<endl<<endl; if ( cudaSuccess != cudaGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<cudaSuccess<<endl<<endl; } // find the size of the number of results errCode=cudaMemcpyAsync( &cnt[tid], &dev_cnt[tid], sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",cnt[tid]); } //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey[tid]); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue[tid]); //XXXXXXXXXXXXXXXX //THRUST USING STREAMS REQUIRES THRUST V1.8 //SEEMS TO BE WORKING :) //XXXXXXXXXXXXXXXX try{ thrust::sort_by_key(thrust::cuda::par.on(stream[tid]), dev_keys_ptr, dev_keys_ptr + cnt[tid], dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //#pragma omp critical //{ //copy the sorted arays back to the host //copy to the appropriate place in the larger host arrays //original thrust copy (doesnt have streams) //thrust::copy(dev_keys_ptr,dev_keys_ptr+cnt[tid],pointIDKey+(*totalResultSetCnt)); //thrust::copy(dev_data_ptr,dev_data_ptr+cnt[tid],pointInDistValue+(*totalResultSetCnt)); //thrust with streams (but into one big buffer) //copy the data back using the streams //cudaMemcpyAsync(thrust::raw_pointer_cast(pointIDKey+(*totalResultSetCnt)), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //cudaMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue+(*totalResultSetCnt)), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //thrust with streams into individual buffers for each batch //cudaMemcpyAsync(thrust::raw_pointer_cast(batchedResultSet[i].pointIDKey), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //cudaMemcpyAsync(thrust::raw_pointer_cast(batchedResultSet[i].pointInDistValue), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //thrust with streams (but into one big buffer) where each batch can write to a different spot in the big buffer //as the big array is chunked into the gpu batch size //copy the data back using the streams //FOR PINNED MEMORY //cudaMemcpyAsync(thrust::raw_pointer_cast(pointIDKey+(i*GPUBufferSize)), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //cudaMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue+(i*GPUBufferSize)), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //thrust with streams (but into one big buffer) where each batch can write to a different spot in the big buffer //as the big array is chunked into the gpu batch size //FOR PAGED MEMORY -- cant use streams // cudaMemcpy(thrust::raw_pointer_cast(pointIDKey+(i*GPUBufferSize)), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost); // cudaMemcpy(thrust::raw_pointer_cast(pointInDistValue+(i*GPUBufferSize)), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost); //thrust with streams into individual buffers for each batch cudaMemcpyAsync(thrust::raw_pointer_cast(pointIDKey[tid]), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); cudaMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue[tid]), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //need to make sure the data is copied before constructing portion of the neighbor table cudaStreamSynchronize(stream[tid]); double tableconstuctstart=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey[tid], pointInDistValue[tid], neighborTable, &cnt[tid]); double tableconstuctend=omp_get_wtime(); printf("\nTable construct time: %f", tableconstuctend - tableconstuctstart); //add the batched result set size to the total count totalResultsLoop+=cnt[tid]; printf("\nRunning total of total size of result array, tid: %d: %u", tid, totalResultsLoop); //} } //END LOOP OVER THE GPU BATCHES printf("\nTOTAL RESULT SET SIZE ON HOST: %u", totalResultsLoop); *totalNeighbors=totalResultsLoop; double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) // unsigned int * debug1; // debug1=(unsigned int*)malloc(sizeof(unsigned int)); // *debug1=0; // unsigned int * debug2; // debug2=(unsigned int*)malloc(sizeof(unsigned int)); // *debug2=0; double tStartdebug=omp_get_wtime(); errCode=cudaMemcpy(debug1, dev_debug1, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=cudaMemcpy(debug2, dev_debug2, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } double tEnddebug=omp_get_wtime(); printf("\nTime to retrieve debug values: %f", tEnddebug - tStartdebug); /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: double tFreeStart=omp_get_wtime(); //destroy streams // for (int i=0; i<GPUSTREAMS; i++) // { // cudaStreamDestroy(stream[i]); // } for (int i=0; i<GPUSTREAMS; i++) { errCode=cudaStreamDestroy(stream[i]); if(errCode != cudaSuccess) { cout << "\nError: destroying stream" << errCode << endl; } } //free the data on the device cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); cudaFree(dev_database); cudaFree(dev_debug1); cudaFree(dev_debug2); cudaFree(dev_epsilon); cudaFree(dev_grid); cudaFree(dev_lookupArr); cudaFree(dev_gridNumXCells); cudaFree(dev_gridNumYCells); cudaFree(dev_gridMin_x); cudaFree(dev_gridMin_y); cudaFree(dev_N); cudaFree(dev_cnt); cudaFree(dev_offset); cudaFree(dev_batchNumber); //free data related to the individual streams for each batch for (int i=0; i<GPUSTREAMS; i++) { //free the data on the device cudaFree(dev_pointIDKey[i]); cudaFree(dev_pointInDistValue[i]); //free on the host cudaFreeHost(pointIDKey[i]); cudaFreeHost(pointInDistValue[i]); // errCode=cudaMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); // if(errCode != cudaSuccess) { // cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory // } // errCode=cudaMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); // if(errCode != cudaSuccess) { // cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory // } } cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); //free pinned memory on host cudaFreeHost(pointIDKey); cudaFreeHost(pointInDistValue); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); // printf("\nreturning before constructing table (which is commented)"); // return; //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// //NOW CONSTRUCT THE TABLE PARTIALLY WHEN MAKING THE BATCHES // double tStartTableConstruct=omp_get_wtime(); // constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, totalResultSetCnt); // double tEndTableConstruct=omp_get_wtime(); // printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //print table: /* int tmpcnt=0; printf("\nGrid GPU Table**********"); for (int i=0; i<(*DBSIZE); i++) { printf("\nPoint id: %d In distance: ", neighborTable[i].pointID); //sort so it has the same output: std::sort(neighborTable[i].neighbors.begin(),neighborTable[i].neighbors.end()); for (int j=0; j<neighborTable[i].neighbors.size();j++) { printf("%d, ",neighborTable[i].neighbors[j]); tmpcnt++; } } printf("\n count elems: %d", tmpcnt); */ //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// //printf("\ntotal neighbors (in batched fn): %d", *totalNeighbors); cout<<"\n** last error at end of fn batches: "<<cudaGetLastError(); // printf("\nExiting function early.."); // return; } //In this function we batch the results off of the GPU to accomodate larger epsilon values //The results that come from the GPU are in the form of key/value pairs (in two arrays) //Key-a point, Value-a point within epsilon of the key //The batches are mapped to differing streams //Each batch requires its own memory space for the result set //So the number of buffers on the GPU for the results is the number of streams (GPUSTREAMS) //On the host, we use the same size buffers, and number of them, and then build part of the neighbor table with the batch //This is an alternative to making one large array from all of the batches, which would require a large //pinned cuda malloc which is very expensive. It also allows for multiple threads to concurrently build the //neighbor table and interleave GPU work with work on the CPU //Also, the number of batches is estimated by calling a kernel that samples the number of neighbours (1%) and then //estimates the total neighbors, which is used to calculate the total number of batches //To make sure each batch doesn't vary much, we use a strided scheme for each batch //Uses the grid index to compute the direct neighbor table //NO SHARED MEMORY PAGING void makeDistanceTableGPUGridIndexBatchesAlternateTest(std::vector<struct dataElem> * dataPoints, double * epsilon, struct grid * index, double * gridMin_x, double * gridMin_y, int * gridNumXCells, int * gridNumYCells, unsigned int * lookupArr, struct neighborTableLookup * neighborTable, std::vector<struct neighborDataPtrs> * pointersToNeighbors, unsigned int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); //CUDA error code: cudaError_t errCode; cout<<"\n** last error start of fn: "<<cudaGetLastError(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * DBSIZE; DBSIZE=(unsigned int*)malloc(sizeof(unsigned int)); *DBSIZE=dataPoints->size(); printf("\n in main GPU method: DBSIZE is: %u",*DBSIZE);cout.flush(); struct point * database; //pinned memory for the database: database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //dont use pinned memory for the database, its slower than using cudaMalloc //cudaMallocHost((void **) &database, sizeof(struct point)*(*DBSIZE)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*DBSIZE)); printf("\n !!in main GPU method: DBSIZE is: %u",*DBSIZE);cout.flush(); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database //we do this because the data points struct may contain other values than x and y for (int i=0; i<(*DBSIZE); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*DBSIZE), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE INDEX TO THE GPU /////////////////////////////////// int totalGridCells=(*gridNumXCells)*(*gridNumYCells); struct grid * dev_grid; dev_grid=(struct grid*)malloc(sizeof(struct grid)*totalGridCells); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_grid, sizeof(struct grid)*totalGridCells); if(errCode != cudaSuccess) { cout << "\nError: grid index Got error with code " << errCode << endl; cout.flush(); } //copy grid index to the device: errCode=cudaMemcpy(dev_grid, index, sizeof(struct grid)*totalGridCells, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: grid index allocation Got error with code " << errCode << endl; } printf("\nSize of index sent to GPU (GiB): %f", (double)sizeof(struct grid)*totalGridCells/(1024.0*1024.0*1024.0)); /////////////////////////////////// //END COPY THE INDEX TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// int * dev_lookupArr; dev_lookupArr=(int*)malloc(sizeof(int)*(*DBSIZE)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_lookupArr, sizeof(int)*(*DBSIZE)); if(errCode != cudaSuccess) { cout << "\nError: lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=cudaMemcpy(dev_lookupArr, lookupArr, sizeof(int)*(*DBSIZE), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: lookup array allocation Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE LOOKUP ARRAY TO THE GPU /////////////////////////////////// /////////////////////////////////// //COPY GRID DIMENSIONS TO THE GPU //THIS INCLUDES THE NUMBER OF CELLS IN EACH X AND Y DIMENSION, //AND THE STARTING POINT IN THE X AND Y DIMENSIONS THAT THE GRID STARTS AT /////////////////////////////////// //The minimum x boundary of the grid: //gridMin_x double * dev_gridMin_x; dev_gridMin_x=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_x, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_x, gridMin_x, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_x Got error with code " << errCode << endl; } //The minimum y boundary of the grid: //gridMin_y double * dev_gridMin_y; dev_gridMin_y=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_gridMin_y, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridMin_y, gridMin_y, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: gridMin_y Got error with code " << errCode << endl; } //The number of cells in the x dimension: //gridNumXCells int * dev_gridNumXCells; dev_gridNumXCells=(int*)malloc(sizeof(int)); *dev_gridNumXCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumXCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumXCells, gridNumXCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumXCells memcpy Got error with code " << errCode << endl; } //The number of cells in the y dimension: //gridNumYCells int * dev_gridNumYCells; dev_gridNumYCells=(int*)malloc(sizeof(int)); *dev_gridNumYCells=0; //allocate on the device errCode=cudaMalloc((int**)&dev_gridNumYCells, sizeof(int)); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_gridNumYCells, gridNumYCells, sizeof(int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_gridNumYCells memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY GRID DIMENSIONS TO THE GPU /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //total size of the result set as it's batched //this isnt sent to the GPU unsigned int * totalResultSetCnt; totalResultSetCnt=(unsigned int*)malloc(sizeof(unsigned int)); *totalResultSetCnt=0; //count values - for an individual kernel launch //need different count values for each stream unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } // errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); // if(errCode != cudaSuccess) { // cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; // } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //copy to device errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //number of threads per gpu stream //THE NUMBER OF THREADS THAT ARE LAUNCHED IN A SINGLE KERNEL INVOCATION //CAN BE FEWER THAN THE NUMBER OF ELEMENTS IN THE DATABASE IF MORE THAN 1 BATCH unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: dev_N Got error with code " << errCode << endl; } //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_offset, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_batchNumber, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; unsigned int * dev_debug2; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; unsigned int * debug1; debug1=(unsigned int *)malloc(sizeof(unsigned int )); *debug1=0; unsigned int * debug2; debug2=(unsigned int *)malloc(sizeof(unsigned int )); *debug2=0; //allocate on the device errCode=cudaMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=cudaMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //set to 0 //copy debug to device errCode=cudaMemcpy( dev_debug1, debug1, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug1 Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_debug2, debug2, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_debug2 Got error with code " << errCode << endl; } ////////////////////////////////////////////////////////// //ESTIMATE THE BUFFER SIZE AND NUMBER OF BATCHES ETC BY COUNTING THE NUMBER OF RESULTS //TAKE A SAMPLE OF THE DATA POINTS, NOT ALL OF THEM //Use sampleRate for this ///////////////////////////////////////////////////////// printf("\n\n***********************************\nEstimating Batches:"); //Parameters for the batch size estimation. double sampleRate=0.01; //sample 1% of the points in the dataset sampleRate=0.01. //Sample the entire dataset(no sampling) sampleRate=1 int offsetRate=1.0/sampleRate; printf("\nOffset: %d", offsetRate); ///////////////// //N-threads //////////////// double tstartbatchest=omp_get_wtime(); unsigned int * dev_N_batchEst; dev_N_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); unsigned int * N_batchEst; N_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); *N_batchEst=*DBSIZE*sampleRate; //allocate on the device errCode=cudaMalloc((void**)&dev_N_batchEst, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: dev_N_batchEst Got error with code " << errCode << endl; } //copy N to device //N IS THE NUMBER OF THREADS errCode=cudaMemcpy( dev_N_batchEst, N_batchEst, sizeof(unsigned int), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: N batchEST Got error with code " << errCode << endl; } ///////////// //count the result set size //////////// unsigned int * dev_cnt_batchEst; dev_cnt_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); unsigned int * cnt_batchEst; cnt_batchEst=(unsigned int*)malloc(sizeof(unsigned int)); *cnt_batchEst=0; //allocate on the device errCode=cudaMalloc((void**)&dev_cnt_batchEst, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } //copy cnt to device errCode=cudaMemcpy( dev_cnt_batchEst, cnt_batchEst, sizeof(unsigned int), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } ////////////////// //SAMPLE OFFSET - TO SAMPLE THE DATA TO ESTIMATE THE TOTAL NUMBER OF KEY VALUE PAIRS ///////////////// //offset into the database when batching the results unsigned int * sampleOffset; sampleOffset=(unsigned int*)malloc(sizeof(unsigned int)); *sampleOffset=offsetRate; unsigned int * dev_sampleOffset; dev_sampleOffset=(unsigned int*)malloc(sizeof(unsigned int)); //allocate on the device errCode=cudaMalloc((void**)&dev_sampleOffset, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: sample offset Got error with code " << errCode << endl; } //copy offset to device errCode=cudaMemcpy( dev_sampleOffset, sampleOffset, sizeof(unsigned int), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt_batchEst Got error with code " << errCode << endl; } const int TOTALBLOCKSBATCHEST=ceil((1.0*(*DBSIZE)*sampleRate)/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKSBATCHEST); kernelGridIndexBatchEstimator<<< TOTALBLOCKSBATCHEST, BLOCKSIZE>>>(dev_N_batchEst, dev_sampleOffset, dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, dev_cnt_batchEst, dev_database); cout<<"\n** ERROR FROM KERNEL LAUNCH OF BATCH ESTIMATOR: "<<cudaGetLastError(); // find the size of the number of results errCode=cudaMemcpy( cnt_batchEst, dev_cnt_batchEst, sizeof(unsigned int), cudaMemcpyDeviceToHost); if(errCode != cudaSuccess) { cout << "\nError: getting cnt for batch estimate from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size for estimating the number of batches (sampled): %u",*cnt_batchEst); } cudaFree(dev_cnt_batchEst); cudaFree(dev_N_batchEst); cudaFree(dev_sampleOffset); double tendbatchest=omp_get_wtime(); printf("\nTime to get the total result set size from batch estimator: %f",tendbatchest-tstartbatchest); //WE CALCULATE THE BUFFER SIZES AND NUMBER OF BATCHES unsigned int GPUBufferSize=100000000; double alpha=0.05; //overestimation factor unsigned long long estimatedTotalSize=(unsigned long long)(*cnt_batchEst)*(unsigned long long)offsetRate; unsigned long long estimatedTotalSizeWithAlpha=(unsigned long long)(*cnt_batchEst)*(unsigned long long)offsetRate*(1.0+(alpha)); printf("\nEstimated total result set size: %llu", estimatedTotalSize); printf("\nEstimated total result set size (with Alpha %f): %llu", alpha,estimatedTotalSizeWithAlpha); //to accomodate small datasets, we need smaller buffers because the pinned memory malloc is expensive if (estimatedTotalSize<(GPUBufferSize*GPUSTREAMS)) { GPUBufferSize=estimatedTotalSize*(1.0+(alpha*2.0))/(GPUSTREAMS); //we do 2*alpha for small datasets because the //sampling will be worse for small datasets //but we fix the 3 streams still (thats why divide by 3). } unsigned int numBatches=ceil(((1.0+alpha)*estimatedTotalSize*1.0)/(GPUBufferSize*1.0)); printf("\nNumber of batches: %d, buffer size: %d", numBatches, GPUBufferSize); printf("\nEnd Batch Estimator\n***********************************\n"); ///////////////////////////////////////////////////////// //END BATCH ESTIMATOR ///////////////////////////////////////////////////////// /////////////////// //ALLOCATE POINTERS TO INTEGER ARRAYS FOR THE VALUES FOR THE NEIGHBORTABLES /////////////////// //THE NUMBER OF POINTERS IS EQUAL TO THE NUMBER OF BATCHES for (int i=0; i<numBatches; i++) { int *ptr; struct neighborDataPtrs tmpStruct; tmpStruct.dataPtr=ptr; tmpStruct.sizeOfDataArr=0; pointersToNeighbors->push_back(tmpStruct); } /////////////////// //END ALLOCATE POINTERS TO INTEGER ARRAYS FOR THE VALUES FOR THE NEIGHBORTABLES /////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET USING THE BATCH ESTIMATOR /////////////////////////////////// //NEED BUFFERS ON THE GPU AND THE HOST FOR THE NUMBER OF CONCURRENT STREAMS //GPU BUFFER ON THE DEVICE //BUFFER ON THE HOST WITH PINNED MEMORY FOR FAST MEMCPY //BUFFER ON THE HOST TO DUMP THE RESULTS OF BATCHES SO THAT GPU THREADS CAN CONTINUE //EXECUTING STREAMS ON THE HOST //GPU MEMORY ALLOCATION: //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey[GPUSTREAMS]; //key int * dev_pointInDistValue[GPUSTREAMS]; //value for (int i=0; i<GPUSTREAMS; i++) { errCode=cudaMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } } //HOST RESULT ALLOCATION FOR THE GPU TO COPY THE DATA INTO A PINNED MEMORY ALLOCATION //ON THE HOST //pinned result set memory for the host //the number of elements are recorded for that batch in resultElemCountPerBatch //NEED PINNED MEMORY ALSO BECAUSE YOU NEED IT TO USE STREAMS IN THRUST FOR THE MEMCOPY OF THE SORTED RESULTS //PINNED MEMORY TO COPY FROM THE GPU int * pointIDKey[GPUSTREAMS]; //key int * pointInDistValue[GPUSTREAMS]; //value double tstartpinnedresults=omp_get_wtime(); for (int i=0; i<GPUSTREAMS; i++) { cudaMallocHost((void **) &pointIDKey[i], sizeof(int)*GPUBufferSize); cudaMallocHost((void **) &pointInDistValue[i], sizeof(int)*GPUBufferSize); } double tendpinnedresults=omp_get_wtime(); printf("\nTime to allocate pinned memory for results: %f", tendpinnedresults - tstartpinnedresults); // cudaMalloc((void **) &pointIDKey, sizeof(int)*GPUBufferSize*NUMBATCHES); // cudaMalloc((void **) &pointInDistValue, sizeof(int)*GPUBufferSize*NUMBATCHES); printf("\nmemory requested for results ON GPU (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); printf("\nmemory requested for results in MAIN MEMORY (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// ///////////////////////////////// //SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// omp_set_num_threads(GPUSTREAMS); ///////////////////////////////// //END SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// ///////////////////////////////// //CREATE STREAMS //////////////////////////////// cudaStream_t stream[GPUSTREAMS]; for (int i=0; i<GPUSTREAMS; i++){ //cudaStreamCreate(&stream[i]); cudaStreamCreateWithFlags(&stream[i], cudaStreamNonBlocking); } ///////////////////////////////// //END CREATE STREAMS //////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL IN BATCHES /////////////////////////////////// //since we use the strided scheme, some of the batch sizes //are off by 1 of each other, a first group of batches will //have 1 extra data point to process, and we calculate which batch numbers will //have that. The batchSize is the lower value (+1 is added to the first ones) unsigned int batchSize=(*DBSIZE)/numBatches; unsigned int batchesThatHaveOneMore=(*DBSIZE)-(batchSize*numBatches); //batch number 0- < this value have one more printf("\n\n***Batches that have one more: %u batchSize(N): %u, \n\n***",batchSize, batchesThatHaveOneMore); unsigned int totalResultsLoop=0; //FOR LOOP OVER THE NUMBER OF BATCHES STARTS HERE #pragma omp parallel for schedule(static,1) reduction(+:totalResultsLoop) num_threads(GPUSTREAMS) for (int i=0; i<numBatches; i++) { int tid=omp_get_thread_num(); printf("\ntid: %d, starting iteration: %d",tid,i); //N NOW BECOMES THE NUMBER OF POINTS TO PROCESS PER BATCH //AS ONE THREAD PROCESSES A SINGLE POINT if (i<batchesThatHaveOneMore) { N[tid]=batchSize+1; printf("\nN: %d, tid: %d",N[tid], tid); } else { N[tid]=batchSize; printf("\nN (1 less): %d tid: %d",N[tid], tid); } //set relevant parameters for the batched execution that get reset //copy N to device //N IS THE NUMBER OF THREADS errCode=cudaMemcpyAsync( &dev_N[tid], &N[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //the batched result set size (reset to 0): cnt[tid]=0; errCode=cudaMemcpyAsync( &dev_cnt[tid], &cnt[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch //batchOffset[tid]=i*batchSize; //original batchOffset[tid]=numBatches; //for the strided errCode=cudaMemcpyAsync( &dev_offset[tid], &batchOffset[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //the batch number for batching with strided batchNumber[tid]=i; errCode=cudaMemcpyAsync( &dev_batchNumber[tid], &batchNumber[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } const int TOTALBLOCKS=ceil((1.0*(N[tid]))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //0 is shared memory pool kernelGridIndex<<< TOTALBLOCKS, BLOCKSIZE, 0, stream[tid] >>>(&dev_N[tid], &dev_offset[tid], &dev_batchNumber[tid], dev_debug1, dev_debug2, dev_epsilon, dev_grid, dev_gridMin_x, dev_gridMin_y, dev_gridNumXCells, dev_gridNumYCells, dev_lookupArr, &dev_cnt[tid], dev_database, dev_pointIDKey[tid], dev_pointInDistValue[tid]); // errCode=cudaDeviceSynchronize(); // cout <<"\n\nError from device synchronize: "<<errCode; cout <<"\n\nKERNEL LAUNCH RETURN: "<<cudaGetLastError()<<endl<<endl; if ( cudaSuccess != cudaGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<cudaSuccess<<endl<<endl; } // find the size of the number of results errCode=cudaMemcpyAsync( &cnt[tid], &dev_cnt[tid], sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size within epsilon (GPU grid): %d",cnt[tid]); } //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey[tid]); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue[tid]); //XXXXXXXXXXXXXXXX //THRUST USING STREAMS REQUIRES THRUST V1.8 //SEEMS TO BE WORKING :) //XXXXXXXXXXXXXXXX try{ thrust::sort_by_key(thrust::cuda::par.on(stream[tid]), dev_keys_ptr, dev_keys_ptr + cnt[tid], dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //thrust with streams into individual buffers for each batch cudaMemcpyAsync(thrust::raw_pointer_cast(pointIDKey[tid]), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); cudaMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue[tid]), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //need to make sure the data is copied before constructing portion of the neighbor table cudaStreamSynchronize(stream[tid]); double tableconstuctstart=omp_get_wtime(); //set the number of neighbors in the pointer struct: (*pointersToNeighbors)[i].sizeOfDataArr=cnt[tid]; (*pointersToNeighbors)[i].dataPtr=new int[cnt[tid]]; constructNeighborTableKeyValueAlternateTest(pointIDKey[tid], pointInDistValue[tid], neighborTable, (*pointersToNeighbors)[i].dataPtr, &cnt[tid]); //cout <<"\nIn make neighbortable. Data array ptr: "<<(*pointersToNeighbors)[i].dataPtr<<" , size of data array: "<<(*pointersToNeighbors)[i].sizeOfDataArr;cout.flush(); double tableconstuctend=omp_get_wtime(); printf("\nTable construct time: %f", tableconstuctend - tableconstuctstart); //add the batched result set size to the total count totalResultsLoop+=cnt[tid]; printf("\nRunning total of total size of result array, tid: %d: %u", tid, totalResultsLoop); //} } //END LOOP OVER THE GPU BATCHES printf("\nTOTAL RESULT SET SIZE ON HOST: %u", totalResultsLoop); *totalNeighbors=totalResultsLoop; double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) // unsigned int * debug1; // debug1=(unsigned int*)malloc(sizeof(unsigned int)); // *debug1=0; // unsigned int * debug2; // debug2=(unsigned int*)malloc(sizeof(unsigned int)); // *debug2=0; double tStartdebug=omp_get_wtime(); errCode=cudaMemcpy(debug1, dev_debug1, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=cudaMemcpy(debug2, dev_debug2, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } double tEnddebug=omp_get_wtime(); printf("\nTime to retrieve debug values: %f", tEnddebug - tStartdebug); /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: double tFreeStart=omp_get_wtime(); //destroy streams // for (int i=0; i<GPUSTREAMS; i++) // { // cudaStreamDestroy(stream[i]); // } for (int i=0; i<GPUSTREAMS; i++) { errCode=cudaStreamDestroy(stream[i]); if(errCode != cudaSuccess) { cout << "\nError: destroying stream" << errCode << endl; } } //free the data on the device cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); cudaFree(dev_database); cudaFree(dev_debug1); cudaFree(dev_debug2); cudaFree(dev_epsilon); cudaFree(dev_grid); cudaFree(dev_lookupArr); cudaFree(dev_gridNumXCells); cudaFree(dev_gridNumYCells); cudaFree(dev_gridMin_x); cudaFree(dev_gridMin_y); cudaFree(dev_N); cudaFree(dev_cnt); cudaFree(dev_offset); cudaFree(dev_batchNumber); //free data related to the individual streams for each batch for (int i=0; i<GPUSTREAMS; i++) { //free the data on the device cudaFree(dev_pointIDKey[i]); cudaFree(dev_pointInDistValue[i]); //free on the host cudaFreeHost(pointIDKey[i]); cudaFreeHost(pointInDistValue[i]); } cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); //free pinned memory on host cudaFreeHost(pointIDKey); cudaFreeHost(pointInDistValue); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); //printf("\ntotal neighbors (in batched fn): %d", *totalNeighbors); cout<<"\n** last error at end of fn batches: "<<cudaGetLastError(); // printf("\nExiting function early.."); // return; } //void constructNeighborTableKeyValueAlternateTest(int * pointIDKey, int * pointInDistValue, struct neighborTableLookup * neighborTable, int * pointersToNeighbors, unsigned int * cnt); void constructNeighborTableKeyValueAlternateTest(int * pointIDKey, int * pointInDistValue, struct neighborTableLookup * neighborTable, int * pointersToNeighbors, unsigned int * cnt) { //need to take as input a pointer to an array of integers that has not been allocated yet (neighborTableData), 1 per batch (2D array) // int * ptrToData; // pointersToNeighbors.push_back() //allocate memory to the array that holds all of the direct neighbors: //pointersToNeighbors= new int[*cnt]; //record the size of the array // pointersToNeighbors->sizeOfDataArr=*cnt; //copy the value data: std::copy(pointInDistValue, pointInDistValue+(*cnt), pointersToNeighbors); // printf("\nTest copy: "); // int sample=ceil((*cnt)*0.001); // for (int i=0; i<sample; i++) // { // printf("\nval: %d",pointersToNeighbors[i]); // } //Step 1: find all of the unique keys and their positions in the key array //double tstart=omp_get_wtime(); unsigned int numUniqueKeys=0; struct keyData{ int key; int position; }; std::vector<keyData> uniqueKeyData; keyData tmp; tmp.key=pointIDKey[0]; tmp.position=0; uniqueKeyData.push_back(tmp); //we assign the ith data item when iterating over i+1th data item, //so we go 1 loop iteration beyond the number (*cnt) for (int i=1; i<(*cnt)+1; i++) { if (pointIDKey[i-1]!=pointIDKey[i]) { numUniqueKeys++; tmp.key=pointIDKey[i]; tmp.position=i; uniqueKeyData.push_back(tmp); } } //insert into the neighbor table the values based on the positions of //the unique keys obtained above. for (int i=0; i<uniqueKeyData.size()-1; i++) { int keyElem=uniqueKeyData[i].key; neighborTable[keyElem].pointID=keyElem; neighborTable[keyElem].indexmin=uniqueKeyData[i].position; neighborTable[keyElem].indexmax=uniqueKeyData[i+1].position-1; //update the pointer to the data array for the values neighborTable[keyElem].dataPtr=pointersToNeighbors; } /* //newer multithreaded way: //Step 1: find all of the unique keys and their positions in the key array //double tstart=omp_get_wtime(); unsigned int numUniqueKeys=0; unsigned int count=0; struct keyData{ int key; int position; }; std::vector<keyData> uniqueKeyData; keyData tmp; tmp.key=pointIDKey[0]; tmp.position=0; uniqueKeyData.push_back(tmp); //we assign the ith data item when iterating over i+1th data item, //so we go 1 loop iteration beyond the number (*cnt) for (int i=1; i<(*cnt)+1; i++) { if (pointIDKey[i-1]!=pointIDKey[i]) { numUniqueKeys++; tmp.key=pointIDKey[i]; tmp.position=i; uniqueKeyData.push_back(tmp); } } //Step 2: In parallel, insert into the neighbor table the values based on the positions of //the unique keys obtained above. Since multiple threads access this function, we don't want to oversubscribe the //machine with nested parallelism, so limit to 2 threads omp_set_nested(1); #pragma omp parallel for reduction(+:count) num_threads(2) schedule(static,1) for (int i=0; i<uniqueKeyData.size()-1; i++) { int keyElem=uniqueKeyData[i].key; int valStart=uniqueKeyData[i].position; int valEnd=uniqueKeyData[i+1].position-1; int size=valEnd-valStart+1; //printf("\nval: start:%d, end: %d", valStart,valEnd); neighborTable[keyElem].pointID=keyElem; neighborTable[keyElem].neighbors.insert(neighborTable[keyElem].neighbors.begin(),&pointInDistValue[valStart],&pointInDistValue[valStart+size]); //printf("\ni: %d, keyElem: %d, position start: %d, position end: %d, size: %d", i,keyElem,valStart, valEnd,size); count+=size; } */ } void constructNeighborTableKeyValue(int * pointIDKey, int * pointInDistValue, struct table * neighborTable, unsigned int * cnt) { //newer multithreaded way: //Step 1: find all of the unique keys and their positions in the key array //double tstart=omp_get_wtime(); unsigned int numUniqueKeys=0; unsigned int count=0; struct keyData{ int key; int position; }; std::vector<keyData> uniqueKeyData; keyData tmp; tmp.key=pointIDKey[0]; tmp.position=0; uniqueKeyData.push_back(tmp); //we assign the ith data item when iterating over i+1th data item, //so we go 1 loop iteration beyond the number (*cnt) for (int i=1; i<(*cnt)+1; i++) { if (pointIDKey[i-1]!=pointIDKey[i]) { numUniqueKeys++; tmp.key=pointIDKey[i]; tmp.position=i; uniqueKeyData.push_back(tmp); } } //Step 2: In parallel, insert into the neighbor table the values based on the positions of //the unique keys obtained above. Since multiple threads access this function, we don't want to oversubscribe the //machine with nested parallelism, so limit to 2 threads omp_set_nested(1); #pragma omp parallel for reduction(+:count) num_threads(2) schedule(static,1) for (int i=0; i<uniqueKeyData.size()-1; i++) { int keyElem=uniqueKeyData[i].key; int valStart=uniqueKeyData[i].position; int valEnd=uniqueKeyData[i+1].position-1; int size=valEnd-valStart+1; //printf("\nval: start:%d, end: %d", valStart,valEnd); neighborTable[keyElem].pointID=keyElem; neighborTable[keyElem].neighbors.insert(neighborTable[keyElem].neighbors.begin(),&pointInDistValue[valStart],&pointInDistValue[valStart+size]); //printf("\ni: %d, keyElem: %d, position start: %d, position end: %d, size: %d", i,keyElem,valStart, valEnd,size); count+=size; } } void constructNeighborTable(thrust::host_vector<structresults> * hVectResults, struct table * neighborTable, unsigned int * cnt) { //original way: // for (unsigned int i=0; i<(*cnt); i++) // { // unsigned int elemID=hVectResults[i].pointID; // neighborTable[elemID].pointID=elemID; // neighborTable[elemID].neighbors.push_back(hVectResults[i].pointInDist); // } //end original way //new way: loop over and find the ranges of the different point ids //then make one insert into the vector unsigned int lastElemID=(*hVectResults)[0].pointID; unsigned int lastIndex=0; //we assign the ith data item when iterating over i+1th data item, //so we go 1 loop iteration beyond the number (*cnt) for (unsigned int i=1; i<(*cnt)+1; i++) { if ((*hVectResults)[i].pointID!=lastElemID) { unsigned int rangemax=i-1; int tmpSize=rangemax-lastIndex+1; unsigned int tmp[tmpSize]; for (int j=lastIndex; j<=rangemax; j++) { tmp[j-lastIndex]=(*hVectResults)[j].pointInDist; } neighborTable[lastElemID].pointID=lastElemID; neighborTable[lastElemID].neighbors.insert(neighborTable[lastElemID].neighbors.begin(),tmp, tmp+tmpSize); //update the new last elem id lastElemID=(*hVectResults)[i].pointID; lastIndex=i; } } //print table: /* int tmpcnt=0; printf("\nGrid GPU Table**********"); for (int i=0; i<(*N); i++) { printf("\nPoint id: %d In distance: ", neighborTable[i].pointID); for (int j=0; j<neighborTable[i].neighbors.size();j++) { printf("%d, ",neighborTable[i].neighbors[j]); tmpcnt++; } } printf("\n count elems: %d", tmpcnt); */ } //Uses a brute force kernel to calculate the direct neighbors of the points in the database void makeDistanceTableGPUBruteForce(std::vector<struct dataElem> * dataPoints, double * epsilon, struct table * neighborTable, int * totalNeighbors) { //CUDA error code: cudaError_t errCode; /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in main GPU method: N is: %u",*N);cout.flush(); struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); printf("\n !!in main GPU method: N is: %u",*N);cout.flush(); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// //NON-PINNED MEMORY FOR SINGLE KERNEL INVOCATION (NO BATCHING) //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=cudaMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // cudaMallocHost((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // cudaMallocHost((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////////// //SET OTHER KERNEL PARAMETERS /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //Epsilon double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //*dev_epsilon=*epsilon; //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //size of the database: unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //debug values unsigned int * dev_debug1; unsigned int * dev_debug2; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; //allocate on the device errCode=cudaMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; } errCode=cudaMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; } //copy N, epsilon and cnt to the device //epsilon errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } //N (DATASET SIZE) errCode=cudaMemcpy( dev_N, N, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } /////////////////////////////////// //END SET OTHER KERNEL PARAMETERS /////////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL /////////////////////////////////// const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel kernelBruteForce<<< TOTALBLOCKS, BLOCKSIZE >>>(dev_N, dev_debug1, dev_debug2, dev_epsilon, dev_cnt, dev_database, dev_pointIDKey, dev_pointInDistValue); if ( cudaSuccess != cudaGetLastError() ){ printf( "Error in kernel launch!\n" ); } /////////////////////////////////// //END LAUNCH KERNEL /////////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size on within epsilon: %d",*cnt); } //copy the results, but only transfer the number of results, not the entire buffer // errCode=cudaMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), cudaMemcpyDeviceToHost ); // if(errCode != cudaSuccess) { // cout << "\nError: getting results from GPU Got error with code " << errCode << endl; // } *totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX //get debug information (optional) unsigned int * debug1; debug1=(unsigned int*)malloc(sizeof(unsigned int)); *debug1=0; unsigned int * debug2; debug2=(unsigned int*)malloc(sizeof(unsigned int)); *debug2=0; errCode=cudaMemcpy(debug1, dev_debug1, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug1 value: %u",*debug1); } errCode=cudaMemcpy(debug2, dev_debug2, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting debug1 from GPU Got error with code " << errCode << endl; } else { printf("\nDebug2 value: %u",*debug2); } /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// /////////////////////////////////// //FREE MEMORY FROM THE GPU /////////////////////////////////// //free: cudaFree(dev_database); cudaFree(dev_debug1); cudaFree(dev_debug2); cudaFree(dev_cnt); cudaFree(dev_epsilon); //cudaFree(dev_results); //////////////////////////////////// //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //print table: // for (int i=0; i<(*N); i++) // { // printf("\nPoint id: %d In distance: ", neighborTable[i].pointID); // for (int j=0; j<neighborTable[i].neighbors.size();j++) // { // printf("%d, ",neighborTable[i].neighbors[j]); // } // } //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //generates an array and lookup table for the GPU, from individual pointers to the neighbors from a previous table's results //This one uses the new implementation that doesn't use vectors //Input: //numPoints //inputNeighborTable //OLD: dataPtr - vector of pointers to the arrays containing the neighbors across the previous batches in the neighborTableLookup //std::vector<struct neighborDataPtrs> *dataPtr, //Outputs: //directNeighborArray -- the ids of the points within epsilon of the input table //gpuLookupArray -- points to the indices of the elements in directNeighborArray void generateNeighborArrayForGPUAlternative(unsigned int numPoints, struct neighborTableLookup * inputNeighborTable, int * directNeighborArray, struct gpulookuptable * gpuLookupArray) { //populate the direct neighboer array //and the lookup array at the same time //This is because the neighbors for each data point are stored across the various arrays allocated for each batch int startIndex=0; for (int i=0; i<numPoints; i++) { int indexmin=inputNeighborTable[i].indexmin; int indexmax=inputNeighborTable[i].indexmax; int * data= (inputNeighborTable[i].dataPtr)+indexmin; int sizeRange=indexmax-indexmin+1; //printf("\nIteration: %d, Start index: %d, sizeRange: %d",i,startIndex,sizeRange); //std::copy(data+indexmin, data+sizeRange, directNeighborArray+startIndex); std::copy(data, data+sizeRange, directNeighborArray+startIndex); gpuLookupArray[i].indexmin=startIndex; gpuLookupArray[i].indexmax=startIndex+sizeRange-1; startIndex+=sizeRange; } } //generates an array and lookup table for the GPU. This is because we can't use vectors on the GPU. void generateNeighborArrayForGPU(unsigned int numPoints, struct table * inputNeighborTable, int * directNeighborArray, struct gpulookuptable * gpuLookupArray) { int startIndex=0; unsigned int cnt=0; for (int i=0; i<numPoints; i++) { startIndex=cnt; for (int j=0; j<inputNeighborTable[i].neighbors.size(); j++) { directNeighborArray[cnt]=inputNeighborTable[i].neighbors[j]; cnt++; } gpuLookupArray[i].indexmin=startIndex; gpuLookupArray[i].indexmax=cnt-1; } } void generateDistanceTableFromPreviousTable(std::vector<struct dataElem> * dataPoints, struct gpulookuptable * gpuLookupArray, int * directNeighborArray, int * totalDirectNeighbors, double * epsilon, struct table * neighborTable) { printf("\nIn generate from previous table:\nDatapoints: %lu, \nTotal direct neighbors: %d\n",dataPoints->size(), *totalDirectNeighbors); cout<<"\n** Last CUDA error start of fn: "<<cudaGetLastError(); //CUDA error code: cudaError_t errCode; unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)); *N=dataPoints->size(); printf("\n in generate previous table GPU method: N is: %u",*N);cout.flush(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*N)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*N)); if(errCode != cudaSuccess) { cout << "\nError: database (in previous table method) Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<(*N); i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database memcopy (in previous table method) Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////// //Copy the lookup array struct to the GPU: /////////////////////////////// struct gpulookuptable * dev_gpuLookupArray; dev_gpuLookupArray=(struct gpulookuptable*)malloc(sizeof(struct gpulookuptable)*(*N)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_gpuLookupArray, sizeof(struct gpulookuptable)*(*N)); if(errCode != cudaSuccess) { cout << "\nError: gpu lookup array Got error with code " << errCode << endl; cout.flush(); } //copy lookup array to the device: errCode=cudaMemcpy(dev_gpuLookupArray, gpuLookupArray, sizeof(struct gpulookuptable)*(*N), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: lookup array memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END Copy the lookup array struct to the GPU: /////////////////////////////// /////////////////////////////// //Copy direct neighbor array to the GPU: /////////////////////////////// int * dev_directNeighborArray; dev_directNeighborArray=(int*)malloc(sizeof(int)*(*totalDirectNeighbors)); //allocate memory on device for the direct neighbor array: errCode=cudaMalloc( (void**)&dev_directNeighborArray, sizeof(int)*(*totalDirectNeighbors)); if(errCode != cudaSuccess) { cout << "\nError: gpu direct neighbor array Got error with code " << errCode << endl; cout.flush(); } //copy direct neighbor array to the device: errCode=cudaMemcpy(dev_directNeighborArray, directNeighborArray, sizeof(int)*(*totalDirectNeighbors), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: direct neighbor array memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END Copy direct neighbor array to the GPU: /////////////////////////////// /////////////////////////////// //copy the size of the database /////////////////////////////// unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_N, N, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } /////////////////////////////// //END copy the size of the database /////////////////////////////// /////////////////////////////// //copy the newer (smaller) epsilon /////////////////////////////// double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END copy the newer (smaller) epsilon /////////////////////////////// /////////////////////////////////// //ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } /////////////////////////////////// //END ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// // struct structresults * dev_results; // struct structresults * results; // errCode=cudaMalloc((void **)&dev_results, sizeof(struct structresults)*BUFFERELEM); // if(errCode != cudaSuccess) { // cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory // } // printf("\nmemory requested for results from previous table (GiB): %f",(double)(sizeof(struct structresults)*BUFFERELEM)/(1024*1024*1024)); // //host result allocation: // results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); int * dev_pointIDKey; //key int * dev_pointInDistValue; //value int * pointIDKey; //key int * pointInDistValue; //value errCode=cudaMalloc((void **)&dev_pointIDKey, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue, sizeof(int)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(int)*2*BUFFERELEM)/(1024*1024*1024)); double tstartalloc=omp_get_wtime(); //host result allocation: //pinned result set memory for the host // cudaMallocHost((void **) &pointIDKey, sizeof(int)*BUFFERELEM); // cudaMallocHost((void **) &pointInDistValue, sizeof(int)*BUFFERELEM); //PAGED MEMORY ALLOCATION FOR SMALL RESULT SET WITH SINGLE KERNEL EXECUTION? pointIDKey=(int*)malloc(sizeof(int)*BUFFERELEM); pointInDistValue=(int*)malloc(sizeof(int)*BUFFERELEM); double tendalloc=omp_get_wtime(); //printf("\nTime to allocate pinned memory on the host: %f", tendalloc - tstartalloc); printf("\nTime to allocate (non-pinned) memory on the host: %f", tendalloc - tstartalloc); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////////////////// //EXECUTE KERNEL /////////////////////////////// const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks (from previous table method): %d",TOTALBLOCKS); //execute kernel calcNeighborsFromTableKernel<<< TOTALBLOCKS, BLOCKSIZE >>>(dev_N, dev_gpuLookupArray, dev_directNeighborArray, dev_cnt, dev_epsilon, dev_database, dev_pointIDKey, dev_pointInDistValue); cout <<endl<<"After kernel launch, Error code: "<<cudaGetLastError()<<endl; if ( cudaSuccess != cudaGetLastError() ){ printf( "\nError in kernel launch (previous table method)!" ); // cout <<endl<<"Error code: "<<cudaGetLastError()<<endl; } /////////////////////////////// //END EXECUTE KERNEL /////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results //errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); // if(errCode != cudaSuccess) { // cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; // } // else // { // printf("\nGPU: result set size on within epsilon: %d",*cnt); // } errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size on GPU within epsilon (from precomputed table method): %d",*cnt); } //copy the results, but only transfer the number of results, not the entire buffer // errCode=cudaMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), cudaMemcpyDeviceToHost ); // if(errCode != cudaSuccess) { // cout << "\nError: getting results from GPU (from the precomputed table) Got error with code " << errCode << endl; // } //*totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// ///////////////// //FREE ///////////////// cudaFree(dev_directNeighborArray); cudaFree(dev_gpuLookupArray); //cudaFree(dev_results); cudaFree(dev_database); cudaFree(dev_epsilon); cudaFree(dev_N); cudaFree(dev_cnt); //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue); // allocate space for the output //thrust::device_vector<int> sortedKeys(*cnt); //thrust::device_vector<int> sortedVals(*cnt); try{ thrust::sort_by_key(dev_keys_ptr, dev_keys_ptr + (*cnt), dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //copy the sorted arays back to the host thrust::copy(dev_keys_ptr,dev_keys_ptr+(*cnt),pointIDKey); thrust::copy(dev_data_ptr,dev_data_ptr+(*cnt),pointInDistValue); //free the data on the device cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// double tStartTableConstruct=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey, pointInDistValue, neighborTable, cnt); double tEndTableConstruct=omp_get_wtime(); printf("\nTime constructing table: %f",tEndTableConstruct - tStartTableConstruct); //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// /* //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //////////////////////////////////// double tstartsort=omp_get_wtime(); //make a host vector initialized with the results that have been transfered from the GPU thrust::host_vector<structresults> hVectResults(results,results+(*cnt)); // for (int i=0;i<numResults;i++) // { // printf("\n host vector: %d, %d",hVectResults[i].pointID,hVectResults[i].pointInDist); // } // for (int i=0; i<numResults; i++) // { // structresults tmp; // tmp.pointID=0; // tmp.pointInDist=0; // hVectResults.push_back(tmp); // } //Now transfer the hostvector to the device: thrust::device_vector<structresults> dVectResults=hVectResults; //sort the device vector on the GPU try{ thrust::sort(dVectResults.begin(), dVectResults.end(),compareThrust()); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } // transfer the sorted results back to host thrust::copy(dVectResults.begin(), dVectResults.end(), hVectResults.begin()); double tendsort=omp_get_wtime(); printf("\nTime to sort on the GPU (from precompute table): %f",tendsort-tstartsort); //print GPU: // for (int i=0; i<(*cnt);i++) // { // printf("\nPrecompute GPU elem: %d, data: %d",hVectResults[i].pointID,hVectResults[i].pointInDist); // } //////////////////////////////////// //END SORT THE DATA ON THE GPU //////////////////////////////////// //////////////////////////////////// //CONSTRUCT TABLE //////////////////////////////////// for (unsigned int i=0; i<(*cnt); i++) { unsigned int index=hVectResults[i].pointID; neighborTable[index].pointID=index; neighborTable[index].neighbors.push_back(hVectResults[i].pointInDist); } */ //print table: /* printf("\n****Precompute table: "); for (int i=0; i<(*N); i++) { printf("\nPoint id: %d In distance: ", neighborTable[i].pointID); for (int j=0; j<neighborTable[i].neighbors.size();j++) { printf("%d, ",neighborTable[i].neighbors[j]); } } */ //////////////////////////////////// //END CONSTRUCT TABLE //////////////////////////////////// } //USE THIS TO MAKE A TABLE FROM A PREVIOUS TABLE WITH A HIGHER EPSILON //TAKES AS INPUT: //The data points (Database) gpuLookupArray //A lookup array that points to an array with the neighbors of each data point (directNeighborArray) //The total number of direct neighbors: totalDirectNeighbors //epsilon //previousEpsilon- the epsilon that made the input direct neighbors: used to estimate batch sizes for the new epsilon //The resulting neighborTable to be passed into DBSCAN //The total number of neighbors in the table //It batches the results off of the GPU. //However, if the number of direct neighbors are too large, we don't batch these on and off in addition to the resultset //We return false and generate a new neighborTable using the index and not another neighborTable) bool generateDistanceTableFromPreviousTableBatches(std::vector<struct dataElem> * dataPoints, struct gpulookuptable * gpuLookupArray, int * directNeighborArray, unsigned int * totalDirectNeighbors, double * epsilon, double * previousEpsilon, struct table * neighborTable, unsigned int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); printf("\nIn generate from previous table:\nDatapoints: %lu, \nTotal direct neighbors: %d\n",dataPoints->size(), *totalDirectNeighbors); cout<<"\n** Last CUDA error start of fn: "<<cudaGetLastError(); printf("\n\nNOTE THAT SEG FAULTS ARE TYPICALLY DUE TO INSUFFICIENT BUFFER SPACE FOR THE RESULTS WHEN BATCHING\n\n"); //CUDA error code: cudaError_t errCode; unsigned int * DBSIZE; DBSIZE=(unsigned int*)malloc(sizeof(unsigned int)); *DBSIZE=dataPoints->size(); printf("\n in generate previous table GPU method: DNSIZE is: %u",*DBSIZE);cout.flush(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*DBSIZE)); if(errCode != cudaSuccess) { cout << "\nError: database (in previous table method) Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<*DBSIZE; i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*DBSIZE), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database memcopy (in previous table method) Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////// //Copy the lookup array struct to the GPU: /////////////////////////////// struct gpulookuptable * dev_gpuLookupArray; dev_gpuLookupArray=(struct gpulookuptable*)malloc(sizeof(struct gpulookuptable)*(*DBSIZE)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_gpuLookupArray, sizeof(struct gpulookuptable)*(*DBSIZE)); if(errCode != cudaSuccess) { cout << "\nError: gpu lookup array Got error with code " << errCode << endl; cout.flush(); } printf("\nSize of lookup table: %f (GiB)", (double)sizeof(struct gpulookuptable)*(*DBSIZE)/(1024*1024*1024)); //copy lookup array to the device: errCode=cudaMemcpy(dev_gpuLookupArray, gpuLookupArray, sizeof(struct gpulookuptable)*(*DBSIZE), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: lookup array memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END Copy the lookup array struct to the GPU: /////////////////////////////// /////////////////////////////// //Copy direct neighbor array to the GPU: /////////////////////////////// int * dev_directNeighborArray; dev_directNeighborArray=(int*)malloc(sizeof(int)*(*totalDirectNeighbors)); //allocate memory on device for the direct neighbor array: errCode=cudaMalloc( (void**)&dev_directNeighborArray, sizeof(int)*(*totalDirectNeighbors)); if(errCode != cudaSuccess) { cout << "\nError: gpu direct neighbor array Got error with code " << errCode << endl; cout.flush(); } //copy direct neighbor array to the device: errCode=cudaMemcpy(dev_directNeighborArray, directNeighborArray, sizeof(int)*(*totalDirectNeighbors), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: direct neighbor array memcpy Got error with code " << errCode << endl; } printf("\nSize of direct neighbor array: %f (GiB)", (double)sizeof(int)*(*totalDirectNeighbors)/(1024*1024*1024)); /////////////////////////////// //END Copy direct neighbor array to the GPU: /////////////////////////////// /////////////////////////////// //copy the size of the database /////////////////////////////// //number of threads per gpu stream //THE NUMBER OF THREADS THAT ARE LAUNCHED IN A SINGLE KERNEL INVOCATION //CAN BE FEWER THAN THE NUMBER OF ELEMENTS IN THE DATABASE IF MORE THAN 1 BATCH unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: dev_N Got error with code " << errCode << endl; } //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_offset, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_batchNumber, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } /////////////////////////////// //END copy the size of the database /////////////////////////////// /////////////////////////////// //copy the newer (smaller) epsilon /////////////////////////////// double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END copy the newer (smaller) epsilon /////////////////////////////// /////////////////////////////////// //ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// //count values - for an individual kernel launch //need different count values for each stream unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } // errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); // if(errCode != cudaSuccess) { // cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; // } /////////////////////////////////// //END ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET USING PREVIOUS SIZE OF NEIGHBORTABLE /////////////////////////////////// //NEED BUFFERS ON THE GPU AND THE HOST FOR THE NUMBER OF CONCURRENT STREAMS //GPU BUFFER ON THE DEVICE //BUFFER ON THE HOST WITH PINNED MEMORY FOR FAST MEMCPY //BUFFER ON THE HOST TO DUMP THE RESULTS OF BATCHES SO THAT GPU THREADS CAN CONTINUE //EXECUTING STREAMS ON THE HOST unsigned int GPUBufferSize=100000000; double alpha=1; //overestimation factor is greater for the table because as epsilon increases, the //total number of neighbors within the epsilon neighborhood increases at a lower rate //i.e., as epsilon approaches infinity, the total number of neighbors within epsilon //becomes constant. int numBatches=0; double areaRatioNewOldEpsilon=(M_PI*(*epsilon)*(*epsilon))/(M_PI*(*previousEpsilon)*(*previousEpsilon)); unsigned int estimatedTotalSize=(*totalDirectNeighbors)*areaRatioNewOldEpsilon*(1.0+alpha); printf("\nPrevious table size: %u, area ratio of epsilons: %f, estimated total size (incl. alpha): %u", *totalDirectNeighbors, areaRatioNewOldEpsilon, estimatedTotalSize); //to accomodate small datasets, we need smaller buffers because the pinned memory malloc is expensive if (estimatedTotalSize<(GPUBufferSize*GPUSTREAMS)) { GPUBufferSize=estimatedTotalSize/GPUSTREAMS; //but we fix the 3 streams still (thats why divide by 3). } numBatches=ceil(estimatedTotalSize*1.0/GPUBufferSize*1.0); printf("\n\nNumber of batches: %d, buffer size: %d\n\n", numBatches, GPUBufferSize); //GPU MEMORY ALLOCATION: //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey[GPUSTREAMS]; //key int * dev_pointInDistValue[GPUSTREAMS]; //value for (int i=0; i<GPUSTREAMS; i++) { errCode=cudaMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } } //HOST RESULT ALLOCATION FOR THE GPU TO COPY THE DATA INTO A PINNED MEMORY ALLOCATION //ON THE HOST //pinned result set memory for the host //the number of elements are recorded for that batch in resultElemCountPerBatch //NEED PINNED MEMORY ALSO BECAUSE YOU NEED IT TO USE STREAMS IN THRUST FOR THE MEMCOPY OF THE SORTED RESULTS //PINNED MEMORY TO COPY FROM THE GPU int * pointIDKey[GPUSTREAMS]; //key int * pointInDistValue[GPUSTREAMS]; //value double tstartpinnedresults=omp_get_wtime(); for (int i=0; i<GPUSTREAMS; i++) { cudaMallocHost((void **) &pointIDKey[i], sizeof(int)*GPUBufferSize); cudaMallocHost((void **) &pointInDistValue[i], sizeof(int)*GPUBufferSize); } double tendpinnedresults=omp_get_wtime(); printf("\nTime to allocate pinned memory for results: %f", tendpinnedresults - tstartpinnedresults); // cudaMalloc((void **) &pointIDKey, sizeof(int)*GPUBufferSize*NUMBATCHES); // cudaMalloc((void **) &pointInDistValue, sizeof(int)*GPUBufferSize*NUMBATCHES); printf("\nmemory requested for results ON GPU (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); printf("\nmemory requested for results in MAIN MEMORY (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// ///////////////////////////////// //SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// omp_set_num_threads(GPUSTREAMS); ///////////////////////////////// //END SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// ///////////////////////////////// //CREATE STREAMS //////////////////////////////// cudaStream_t stream[GPUSTREAMS]; for (int i=0; i<GPUSTREAMS; i++){ //cudaStreamCreate(&stream[i]); cudaStreamCreateWithFlags(&stream[i], cudaStreamNonBlocking); } ///////////////////////////////// //END CREATE STREAMS //////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL IN BATCHES /////////////////////////////////// //since we use the strided scheme, some of the batch sizes //are off by 1 of each other, a first group of batches will //have 1 extra data point to process, and we calculate which batch numbers will //have that. The batchSize is the lower value (+1 is added to the first ones) unsigned int batchSize=(*DBSIZE)/numBatches; unsigned int batchesThatHaveOneMore=(*DBSIZE)-(batchSize*numBatches); //batch number 0- < this value have one more printf("\n\n***Batches that have one more: %u batchSize(N): %u, \n\n***",batchSize, batchesThatHaveOneMore); unsigned int totalResultsLoop=0; /* //////OLD NON-BATCHED const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks (from previous table method): %d",TOTALBLOCKS); //execute kernel calcNeighborsFromTableKernel<<< TOTALBLOCKS, BLOCKSIZE >>>(dev_N, dev_gpuLookupArray, dev_directNeighborArray, dev_cnt, dev_epsilon, dev_database, dev_pointIDKey, dev_pointInDistValue); cout <<endl<<"After kernel launch, Error code: "<<cudaGetLastError()<<endl; if ( cudaSuccess != cudaGetLastError() ){ printf( "\nError in kernel launch (previous table method)!" ); // cout <<endl<<"Error code: "<<cudaGetLastError()<<endl; } */ //FOR LOOP OVER THE NUMBER OF BATCHES STARTS HERE #pragma omp parallel for schedule(static,1) reduction(+:totalResultsLoop) num_threads(GPUSTREAMS) for (int i=0; i<numBatches; i++) { int tid=omp_get_thread_num(); printf("\ntid: %d, starting iteration: %d",tid,i); //N NOW BECOMES THE NUMBER OF POINTS TO PROCESS PER BATCH //AS ONE THREAD PROCESSES A SINGLE POINT if (i<batchesThatHaveOneMore) { N[tid]=batchSize+1; printf("\nN: %d, tid: %d",N[tid], tid); } else { N[tid]=batchSize; printf("\nN (1 less): %d tid: %d",N[tid], tid); } //set relevant parameters for the batched execution that get reset //copy N to device //N IS THE NUMBER OF THREADS errCode=cudaMemcpyAsync( &dev_N[tid], &N[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //the batched result set size (reset to 0): cnt[tid]=0; errCode=cudaMemcpyAsync( &dev_cnt[tid], &cnt[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch //batchOffset[tid]=i*batchSize; //original batchOffset[tid]=numBatches; //for the strided errCode=cudaMemcpyAsync( &dev_offset[tid], &batchOffset[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //the batch number for batching with strided batchNumber[tid]=i; errCode=cudaMemcpyAsync( &dev_batchNumber[tid], &batchNumber[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } const int TOTALBLOCKS=ceil((1.0*(N[tid]))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //0 is shared memory pool calcNeighborsFromTableKernelBatches<<< TOTALBLOCKS, BLOCKSIZE, 0, stream[tid] >>>(&dev_N[tid], &dev_offset[tid], &dev_batchNumber[tid], dev_gpuLookupArray, dev_directNeighborArray, &dev_cnt[tid], dev_epsilon, dev_database, dev_pointIDKey[tid], dev_pointInDistValue[tid]); cout <<"\n\nKERNEL LAUNCH RETURN: "<<cudaGetLastError()<<endl<<endl; if ( cudaSuccess != cudaGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<cudaSuccess<<endl<<endl; } // find the size of the number of results errCode=cudaMemcpyAsync( &cnt[tid], &dev_cnt[tid], sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\n\nGPU: result set size within epsilon (CONSTRUCT FROM PREVIOUS NEIGHBORTABLE BATCHES): %d\n\n",cnt[tid]); } //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey[tid]); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue[tid]); //XXXXXXXXXXXXXXXX //THRUST USING STREAMS REQUIRES THRUST V1.8 //SEEMS TO BE WORKING :) //XXXXXXXXXXXXXXXX try{ thrust::sort_by_key(thrust::cuda::par.on(stream[tid]), dev_keys_ptr, dev_keys_ptr + cnt[tid], dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //thrust with streams into individual buffers for each batch cudaMemcpyAsync(thrust::raw_pointer_cast(pointIDKey[tid]), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); cudaMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue[tid]), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //need to make sure the data is copied before constructing portion of the neighbor table cudaStreamSynchronize(stream[tid]); //construct portion of the table: double tableconstuctstart=omp_get_wtime(); constructNeighborTableKeyValue(pointIDKey[tid], pointInDistValue[tid], neighborTable, &cnt[tid]); double tableconstuctend=omp_get_wtime(); printf("\nTable construct time: %f", tableconstuctend - tableconstuctstart); //add the batched result set size to the total count totalResultsLoop+=cnt[tid]; printf("\nRunning total of total size of result array, tid: %d: %u", tid, totalResultsLoop); //} } //END LOOP OVER THE GPU BATCHES printf("\nTOTAL RESULT SET SIZE ON HOST: %d", totalResultsLoop); *totalNeighbors=totalResultsLoop; double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); /////////////////////////////// //END EXECUTE KERNEL /////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results //errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); // if(errCode != cudaSuccess) { // cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; // } // else // { // printf("\nGPU: result set size on within epsilon: %d",*cnt); // } /* errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size on GPU within epsilon (from precomputed table method): %d",*cnt); } */ //copy the results, but only transfer the number of results, not the entire buffer // errCode=cudaMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), cudaMemcpyDeviceToHost ); // if(errCode != cudaSuccess) { // cout << "\nError: getting results from GPU (from the precomputed table) Got error with code " << errCode << endl; // } //*totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// double tFreeStart=omp_get_wtime(); ///////////////// //FREE ///////////////// for (int i=0; i<GPUSTREAMS; i++) { errCode=cudaStreamDestroy(stream[i]); if(errCode != cudaSuccess) { cout << "\nError: destroying stream" << errCode << endl; } } cudaFree(dev_directNeighborArray); cudaFree(dev_gpuLookupArray); //cudaFree(dev_results); cudaFree(dev_database); cudaFree(dev_epsilon); cudaFree(dev_N); cudaFree(dev_cnt); cudaFree(dev_offset); cudaFree(dev_batchNumber); //free data related to the individual streams for each batch for (int i=0; i<GPUSTREAMS; i++) { //free the data on the device cudaFree(dev_pointIDKey[i]); cudaFree(dev_pointInDistValue[i]); //free on the host cudaFreeHost(pointIDKey[i]); cudaFreeHost(pointInDistValue[i]); } cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); //free pinned memory on host cudaFreeHost(pointIDKey); cudaFreeHost(pointInDistValue); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); cout<<"\n** last error at end of fn construct table batches: "<<cudaGetLastError(); return true; } //USE THIS TO MAKE A TABLE FROM A PREVIOUS TABLE WITH A HIGHER EPSILON //TAKES AS INPUT: //The data points (Database) gpuLookupArray //A lookup array that points to an array with the neighbors of each data point (directNeighborArray) //The total number of direct neighbors: totalDirectNeighbors //epsilon //previousEpsilon- the epsilon that made the input direct neighbors: used to estimate batch sizes for the new epsilon //The resulting neighborTable to be passed into DBSCAN //The total number of neighbors in the table //It batches the results off of the GPU. //However, if the number of direct neighbors are too large, we don't batch these on and off in addition to the resultset //We return false and generate a new neighborTable using the index and not another neighborTable) bool generateDistanceTableFromPreviousTableBatchesAlternate(std::vector<struct dataElem> * dataPoints, struct gpulookuptable * gpuLookupArray, int * directNeighborArray, unsigned int * totalDirectNeighbors, double * epsilon, double * previousEpsilon, struct neighborTableLookup * neighborTable, std::vector<struct neighborDataPtrs> * pointersToNeighbors,unsigned int * totalNeighbors) { double tKernelResultsStart=omp_get_wtime(); printf("\nIn generate from previous table:\nDatapoints: %lu, \nTotal direct neighbors: %d\n",dataPoints->size(), *totalDirectNeighbors); cout<<"\n** Last CUDA error start of fn: "<<cudaGetLastError(); printf("\n\nNOTE THAT SEG FAULTS ARE TYPICALLY DUE TO INSUFFICIENT BUFFER SPACE FOR THE RESULTS WHEN BATCHING\n\n"); //CUDA error code: cudaError_t errCode; unsigned int * DBSIZE; DBSIZE=(unsigned int*)malloc(sizeof(unsigned int)); *DBSIZE=dataPoints->size(); printf("\n in generate previous table GPU method: DNSIZE is: %u",*DBSIZE);cout.flush(); /////////////////////////////////// //COPY THE DATABASE TO THE GPU /////////////////////////////////// struct point * database; database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); struct point * dev_database; dev_database=(struct point*)malloc(sizeof(struct point)*(*DBSIZE)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*(*DBSIZE)); if(errCode != cudaSuccess) { cout << "\nError: database (in previous table method) Got error with code " << errCode << endl; cout.flush(); } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<*DBSIZE; i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*(*DBSIZE), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database memcopy (in previous table method) Got error with code " << errCode << endl; } /////////////////////////////////// //END COPY THE DATABASE TO THE GPU /////////////////////////////////// /////////////////////////////// //Copy the lookup array struct to the GPU: /////////////////////////////// struct gpulookuptable * dev_gpuLookupArray; dev_gpuLookupArray=(struct gpulookuptable*)malloc(sizeof(struct gpulookuptable)*(*DBSIZE)); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_gpuLookupArray, sizeof(struct gpulookuptable)*(*DBSIZE)); if(errCode != cudaSuccess) { cout << "\nError: gpu lookup array Got error with code " << errCode << endl; cout.flush(); } printf("\nSize of lookup table: %f (GiB)", (double)sizeof(struct gpulookuptable)*(*DBSIZE)/(1024*1024*1024)); //copy lookup array to the device: errCode=cudaMemcpy(dev_gpuLookupArray, gpuLookupArray, sizeof(struct gpulookuptable)*(*DBSIZE), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: lookup array memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END Copy the lookup array struct to the GPU: /////////////////////////////// /////////////////////////////// //Copy direct neighbor array to the GPU: /////////////////////////////// int * dev_directNeighborArray; dev_directNeighborArray=(int*)malloc(sizeof(int)*(*totalDirectNeighbors)); //allocate memory on device for the direct neighbor array: errCode=cudaMalloc( (void**)&dev_directNeighborArray, sizeof(int)*(*totalDirectNeighbors)); if(errCode != cudaSuccess) { cout << "\nError: gpu direct neighbor array Got error with code " << errCode << endl; cout.flush(); } //copy direct neighbor array to the device: errCode=cudaMemcpy(dev_directNeighborArray, directNeighborArray, sizeof(int)*(*totalDirectNeighbors), cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: direct neighbor array memcpy Got error with code " << errCode << endl; } printf("\nSize of direct neighbor array: %f (GiB)", (double)sizeof(int)*(*totalDirectNeighbors)/(1024*1024*1024)); /////////////////////////////// //END Copy direct neighbor array to the GPU: /////////////////////////////// /////////////////////////////// //copy the size of the database /////////////////////////////// //number of threads per gpu stream //THE NUMBER OF THREADS THAT ARE LAUNCHED IN A SINGLE KERNEL INVOCATION //CAN BE FEWER THAN THE NUMBER OF ELEMENTS IN THE DATABASE IF MORE THAN 1 BATCH unsigned int * N; N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); unsigned int * dev_N; dev_N=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: dev_N Got error with code " << errCode << endl; } //offset into the database when batching the results unsigned int * batchOffset; batchOffset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_offset; dev_offset=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_offset, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: offset Got error with code " << errCode << endl; } //Batch number to calculate the point to process (in conjunction with the offset) //offset into the database when batching the results unsigned int * batchNumber; batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //*batchOffset=0; unsigned int * dev_batchNumber; dev_batchNumber=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); //allocate on the device errCode=cudaMalloc((void**)&dev_batchNumber, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: batchNumber Got error with code " << errCode << endl; } /////////////////////////////// //END copy the size of the database /////////////////////////////// /////////////////////////////// //copy the newer (smaller) epsilon /////////////////////////////// double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); //Allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; } errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon memcpy Got error with code " << errCode << endl; } /////////////////////////////// //END copy the newer (smaller) epsilon /////////////////////////////// /////////////////////////////////// //ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// //count values - for an individual kernel launch //need different count values for each stream unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *cnt=0; unsigned int * dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)*GPUSTREAMS); *dev_cnt=0; //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)*GPUSTREAMS); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; } // errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); // if(errCode != cudaSuccess) { // cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; // } /////////////////////////////////// //END ALLOCATE COUNT ON THE DEVICE (THE NUMBER OF RESULT ITEMS) /////////////////////////////////// /////////////////////////////////// //ALLOCATE MEMORY FOR THE RESULT SET USING PREVIOUS SIZE OF NEIGHBORTABLE /////////////////////////////////// //NEED BUFFERS ON THE GPU AND THE HOST FOR THE NUMBER OF CONCURRENT STREAMS //GPU BUFFER ON THE DEVICE //BUFFER ON THE HOST WITH PINNED MEMORY FOR FAST MEMCPY //BUFFER ON THE HOST TO DUMP THE RESULTS OF BATCHES SO THAT GPU THREADS CAN CONTINUE //EXECUTING STREAMS ON THE HOST unsigned int GPUBufferSize=100000000; double alpha=0.6; //overestimation factor is greater for the table because as epsilon increases, the //total number of neighbors within the epsilon neighborhood increases at a lower rate //i.e., as epsilon approaches infinity, the total number of neighbors within epsilon //becomes constant. int numBatches=0; double areaRatioNewOldEpsilon=(M_PI*(*epsilon)*(*epsilon))/(M_PI*(*previousEpsilon)*(*previousEpsilon)); unsigned int estimatedTotalSize=(*totalDirectNeighbors)*areaRatioNewOldEpsilon*(1.0+alpha); printf("\nPrevious table size: %u, area ratio of epsilons: %f, estimated total size (incl. alpha): %u", *totalDirectNeighbors, areaRatioNewOldEpsilon, estimatedTotalSize); //to accomodate small datasets, we need smaller buffers because the pinned memory malloc is expensive if (estimatedTotalSize<(GPUBufferSize*GPUSTREAMS)) { GPUBufferSize=estimatedTotalSize/GPUSTREAMS; //but we fix the 3 streams still (thats why divide by 3). } numBatches=ceil(estimatedTotalSize*1.0/GPUBufferSize*1.0); printf("\n\nNumber of batches: %d, buffer size: %d\n\n", numBatches, GPUBufferSize); //GPU MEMORY ALLOCATION: //CHANGING THE RESULTS TO KEY VALUE PAIR SORT, WHICH IS TWO ARRAYS //KEY IS THE POINT ID //THE VALUE IS THE POINT ID WITHIN THE DISTANCE OF KEY int * dev_pointIDKey[GPUSTREAMS]; //key int * dev_pointInDistValue[GPUSTREAMS]; //value for (int i=0; i<GPUSTREAMS; i++) { errCode=cudaMalloc((void **)&dev_pointIDKey[i], sizeof(int)*GPUBufferSize); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc((void **)&dev_pointInDistValue[i], sizeof(int)*GPUBufferSize); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } } //HOST RESULT ALLOCATION FOR THE GPU TO COPY THE DATA INTO A PINNED MEMORY ALLOCATION //ON THE HOST //pinned result set memory for the host //the number of elements are recorded for that batch in resultElemCountPerBatch //NEED PINNED MEMORY ALSO BECAUSE YOU NEED IT TO USE STREAMS IN THRUST FOR THE MEMCOPY OF THE SORTED RESULTS //PINNED MEMORY TO COPY FROM THE GPU int * pointIDKey[GPUSTREAMS]; //key int * pointInDistValue[GPUSTREAMS]; //value double tstartpinnedresults=omp_get_wtime(); for (int i=0; i<GPUSTREAMS; i++) { cudaMallocHost((void **) &pointIDKey[i], sizeof(int)*GPUBufferSize); cudaMallocHost((void **) &pointInDistValue[i], sizeof(int)*GPUBufferSize); } double tendpinnedresults=omp_get_wtime(); printf("\nTime to allocate pinned memory for results: %f", tendpinnedresults - tstartpinnedresults); // cudaMalloc((void **) &pointIDKey, sizeof(int)*GPUBufferSize*NUMBATCHES); // cudaMalloc((void **) &pointInDistValue, sizeof(int)*GPUBufferSize*NUMBATCHES); printf("\nmemory requested for results ON GPU (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); printf("\nmemory requested for results in MAIN MEMORY (GiB): %f",(double)(sizeof(int)*2*GPUBufferSize*GPUSTREAMS)/(1024*1024*1024)); /////////////////////////////////// //END ALLOCATE MEMORY FOR THE RESULT SET /////////////////////////////////// /////////////////// //ALLOCATE POINTERS TO INTEGER ARRAYS FOR THE VALUES FOR THE NEIGHBORTABLES /////////////////// //THE NUMBER OF POINTERS IS EQUAL TO THE NUMBER OF BATCHES for (int i=0; i<numBatches; i++) { int *ptr; struct neighborDataPtrs tmpStruct; tmpStruct.dataPtr=ptr; tmpStruct.sizeOfDataArr=0; pointersToNeighbors->push_back(tmpStruct); } /////////////////// //END ALLOCATE POINTERS TO INTEGER ARRAYS FOR THE VALUES FOR THE NEIGHBORTABLES /////////////////// ///////////////////////////////// //SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// omp_set_nested(1); omp_set_num_threads(GPUSTREAMS); ///////////////////////////////// //END SET OPENMP ENVIRONMENT VARIABLES //////////////////////////////// ///////////////////////////////// //CREATE STREAMS //////////////////////////////// cudaStream_t stream[GPUSTREAMS]; for (int i=0; i<GPUSTREAMS; i++){ //cudaStreamCreate(&stream[i]); cudaStreamCreateWithFlags(&stream[i], cudaStreamNonBlocking); } ///////////////////////////////// //END CREATE STREAMS //////////////////////////////// /////////////////////////////////// //LAUNCH KERNEL IN BATCHES /////////////////////////////////// //since we use the strided scheme, some of the batch sizes //are off by 1 of each other, a first group of batches will //have 1 extra data point to process, and we calculate which batch numbers will //have that. The batchSize is the lower value (+1 is added to the first ones) unsigned int batchSize=(*DBSIZE)/numBatches; unsigned int batchesThatHaveOneMore=(*DBSIZE)-(batchSize*numBatches); //batch number 0- < this value have one more printf("\n\n***Batches that have one more: %u batchSize(N): %u, \n\n***",batchSize, batchesThatHaveOneMore); unsigned int totalResultsLoop=0; /* //////OLD NON-BATCHED const int TOTALBLOCKS=ceil((1.0*(*N))/(1.0*BLOCKSIZE)); printf("\ntotal blocks (from previous table method): %d",TOTALBLOCKS); //execute kernel calcNeighborsFromTableKernel<<< TOTALBLOCKS, BLOCKSIZE >>>(dev_N, dev_gpuLookupArray, dev_directNeighborArray, dev_cnt, dev_epsilon, dev_database, dev_pointIDKey, dev_pointInDistValue); cout <<endl<<"After kernel launch, Error code: "<<cudaGetLastError()<<endl; if ( cudaSuccess != cudaGetLastError() ){ printf( "\nError in kernel launch (previous table method)!" ); // cout <<endl<<"Error code: "<<cudaGetLastError()<<endl; } */ //FOR LOOP OVER THE NUMBER OF BATCHES STARTS HERE #pragma omp parallel for schedule(static,1) reduction(+:totalResultsLoop) for (int i=0; i<numBatches; i++) { int tid=omp_get_thread_num(); printf("\nMaking table from previous, tid: %d, starting iteration: %d",tid,i); //N NOW BECOMES THE NUMBER OF POINTS TO PROCESS PER BATCH //AS ONE THREAD PROCESSES A SINGLE POINT if (i<batchesThatHaveOneMore) { N[tid]=batchSize+1; printf("\nN: %d, tid: %d",N[tid], tid); } else { N[tid]=batchSize; printf("\nN (1 less): %d tid: %d",N[tid], tid); } //printf("\nN is: %d, tid: %d", N[tid], tid); //set relevant parameters for the batched execution that get reset //copy N to device //N IS THE NUMBER OF THREADS errCode=cudaMemcpyAsync( &dev_N[tid], &N[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; } //the batched result set size (reset to 0): cnt[tid]=0; errCode=cudaMemcpyAsync( &dev_cnt[tid], &cnt[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_cnt memcpy Got error with code " << errCode << endl; } //the offset for batching, which keeps track of where to start processing at each batch //batchOffset[tid]=i*batchSize; //original batchOffset[tid]=numBatches; //for the strided errCode=cudaMemcpyAsync( &dev_offset[tid], &batchOffset[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_offset memcpy Got error with code " << errCode << endl; } //the batch number for batching with strided batchNumber[tid]=i; errCode=cudaMemcpyAsync( &dev_batchNumber[tid], &batchNumber[tid], sizeof(unsigned int), cudaMemcpyHostToDevice, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: dev_batchNumber memcpy Got error with code " << errCode << endl; } const int TOTALBLOCKS=ceil((1.0*(N[tid]))/(1.0*BLOCKSIZE)); printf("\ntotal blocks: %d",TOTALBLOCKS); //execute kernel //0 is shared memory pool calcNeighborsFromTableKernelBatches<<< TOTALBLOCKS, BLOCKSIZE, 0, stream[tid] >>>(&dev_N[tid], &dev_offset[tid], &dev_batchNumber[tid], dev_gpuLookupArray, dev_directNeighborArray, &dev_cnt[tid], dev_epsilon, dev_database, dev_pointIDKey[tid], dev_pointInDistValue[tid]); cout <<"\n\nKERNEL LAUNCH RETURN: "<<cudaGetLastError()<<endl<<endl; if ( cudaSuccess != cudaGetLastError() ){ cout <<"\n\nERROR IN KERNEL LAUNCH. ERROR: "<<cudaSuccess<<endl<<endl; } // find the size of the number of results errCode=cudaMemcpyAsync( &cnt[tid], &dev_cnt[tid], sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[tid] ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\n\nGPU: result set size within epsilon (CONSTRUCT FROM PREVIOUS NEIGHBORTABLE BATCHES): %d\n\n",cnt[tid]); } //////////////////////////////////// //SORT THE TABLE DATA ON THE GPU //THERE IS NO ORDERING BETWEEN EACH POINT AND THE ONES THAT IT'S WITHIN THE DISTANCE OF //////////////////////////////////// ///////////////////////////// //ONE PROBLEM WITH NOT TRANSFERING THE RESULT OFF OF THE DEVICE IS THAT //YOU CAN'T RESIZE THE RESULTS TO BE THE SIZE OF *CNT //SO THEN YOU HAVE POTENTIALLY LOTS OF WASTED SPACE ///////////////////////////// //sort by key with the data already on the device: //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_keys_ptr(dev_pointIDKey[tid]); thrust::device_ptr<int> dev_data_ptr(dev_pointInDistValue[tid]); //XXXXXXXXXXXXXXXX //THRUST USING STREAMS REQUIRES THRUST V1.8 //SEEMS TO BE WORKING :) //XXXXXXXXXXXXXXXX try{ thrust::sort_by_key(thrust::cuda::par.on(stream[tid]), dev_keys_ptr, dev_keys_ptr + cnt[tid], dev_data_ptr); } catch(std::bad_alloc &e) { std::cerr << "Ran out of memory while sorting" << std::endl; exit(-1); } //thrust with streams into individual buffers for each batch cudaMemcpyAsync(thrust::raw_pointer_cast(pointIDKey[tid]), thrust::raw_pointer_cast(dev_keys_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); cudaMemcpyAsync(thrust::raw_pointer_cast(pointInDistValue[tid]), thrust::raw_pointer_cast(dev_data_ptr), cnt[tid]*sizeof(int), cudaMemcpyDeviceToHost, stream[tid]); //need to make sure the data is copied before constructing portion of the neighbor table cudaStreamSynchronize(stream[tid]); //construct portion of the table: double tableconstuctstart=omp_get_wtime(); //constructNeighborTableKeyValue(pointIDKey[tid], pointInDistValue[tid], neighborTable, &cnt[tid]); //set the number of neighbors in the pointer struct: (*pointersToNeighbors)[i].sizeOfDataArr=cnt[tid]; (*pointersToNeighbors)[i].dataPtr=new int[cnt[tid]]; constructNeighborTableKeyValueAlternateTest(pointIDKey[tid], pointInDistValue[tid], neighborTable, (*pointersToNeighbors)[i].dataPtr, &cnt[tid]); double tableconstuctend=omp_get_wtime(); //cout <<"\nIn neighbortable from previous table. Data array ptr: "<<(*pointersToNeighbors)[i].dataPtr<<" , size of data array: "<<(*pointersToNeighbors)[i].sizeOfDataArr;cout.flush(); printf("\nTable construct time: %f", tableconstuctend - tableconstuctstart); //add the batched result set size to the total count totalResultsLoop+=cnt[tid]; printf("\nRunning total of total size of result array, tid: %d: %u", tid, totalResultsLoop); //} } //END LOOP OVER THE GPU BATCHES printf("\nTOTAL RESULT SET SIZE ON HOST: %d", totalResultsLoop); *totalNeighbors=totalResultsLoop; double tKernelResultsEnd=omp_get_wtime(); printf("\nTime to launch kernel and execute all of the previous part of the method and get the results back: %f",tKernelResultsEnd-tKernelResultsStart); /////////////////////////////// //END EXECUTE KERNEL /////////////////////////////// /////////////////////////////////// //GET RESULT SET /////////////////////////////////// //first find the size of the number of results //errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); // if(errCode != cudaSuccess) { // cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; // } // else // { // printf("\nGPU: result set size on within epsilon: %d",*cnt); // } /* errCode=cudaMemcpy( cnt, dev_cnt, sizeof(unsigned int), cudaMemcpyDeviceToHost ); if(errCode != cudaSuccess) { cout << "\nError: getting cnt from GPU Got error with code " << errCode << endl; } else { printf("\nGPU: result set size on GPU within epsilon (from precomputed table method): %d",*cnt); } */ //copy the results, but only transfer the number of results, not the entire buffer // errCode=cudaMemcpy(results, dev_results, sizeof(struct structresults)*(*cnt), cudaMemcpyDeviceToHost ); // if(errCode != cudaSuccess) { // cout << "\nError: getting results from GPU (from the precomputed table) Got error with code " << errCode << endl; // } //*totalNeighbors=(*cnt); //SORTING FOR TESTING ONLY //XXXXXXXXX //XXXXXXXXX // std::sort(results, results+(*cnt),compResults); // printf("\n**** GPU\n"); // for (int i=0; i<(*cnt); i++) // { // printf("\n%d,%d",results[i].pointID, results[i].pointInDist); // } //XXXXXXXXX //XXXXXXXXX //XXXXXXXXX /////////////////////////////////// //END GET RESULT SET /////////////////////////////////// double tFreeStart=omp_get_wtime(); ///////////////// //FREE ///////////////// for (int i=0; i<GPUSTREAMS; i++) { errCode=cudaStreamDestroy(stream[i]); if(errCode != cudaSuccess) { cout << "\nError: destroying stream" << errCode << endl; } } cudaFree(dev_directNeighborArray); cudaFree(dev_gpuLookupArray); //cudaFree(dev_results); cudaFree(dev_database); cudaFree(dev_epsilon); cudaFree(dev_N); cudaFree(dev_cnt); cudaFree(dev_offset); cudaFree(dev_batchNumber); //free data related to the individual streams for each batch for (int i=0; i<GPUSTREAMS; i++) { //free the data on the device cudaFree(dev_pointIDKey[i]); cudaFree(dev_pointInDistValue[i]); //free on the host cudaFreeHost(pointIDKey[i]); cudaFreeHost(pointInDistValue[i]); } cudaFree(dev_pointIDKey); cudaFree(dev_pointInDistValue); //free pinned memory on host cudaFreeHost(pointIDKey); cudaFreeHost(pointInDistValue); double tFreeEnd=omp_get_wtime(); printf("\nTime freeing memory: %f", tFreeEnd - tFreeStart); cout<<"\n** last error at end of fn construct table batches: "<<cudaGetLastError(); return true; } /* //METHOD TO COPY THE DATABASE TO THE GPU: //takes as input: //the imported points, but which include extraneous information (tec, time) //a pointer to the database on the GPU void copyDatabaseToGPU(std::vector<struct dataElem> * dataPoints, struct point * dev_database) { //CUDA error code: cudaError_t errCode; // unsigned int N=dataPoints->size(); struct point * database; database=(struct point*)malloc(sizeof(struct point)*N); dev_database=(struct point*)malloc(sizeof(struct point)*N); //allocate memory on device: errCode=cudaMalloc( (void**)&dev_database, sizeof(struct point)*N ); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; //2 means not enough memory } //first, we copy the x and y values from dataPoints to the database for (int i=0; i<N; i++) { database[i].x=(*dataPoints)[i].x; database[i].y=(*dataPoints)[i].y; } //printf("\n size of database: %d",N); //copy database to the device: errCode=cudaMemcpy(dev_database, database, sizeof(struct point)*N, cudaMemcpyHostToDevice); if(errCode != cudaSuccess) { cout << "\nError: database Got error with code " << errCode << endl; //2 means not enough memory } } //METHOD TO SET THE KERNEL PARAMETERS void setKernelParams(unsigned int * dev_N, unsigned int * N, unsigned int * dev_debug1, unsigned int * dev_debug2, unsigned int *dev_cnt, double * dev_epsilon, double * epsilon) { //CUDA error code: cudaError_t errCode; //count values unsigned int * cnt; cnt=(unsigned int*)malloc(sizeof(unsigned int)); *cnt=0; //unsigned int *dev_cnt; dev_cnt=(unsigned int*)malloc(sizeof(unsigned int)); *dev_cnt=0; //printf("\ndev cnt in fn: %u",*dev_cnt);cout.flush(); //allocate on the device errCode=cudaMalloc((unsigned int**)&dev_cnt, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: cnt Got error with code " << errCode << endl; //2 means not enough memory } //double * dev_epsilon; dev_epsilon=(double*)malloc(sizeof( double )); *dev_epsilon=*epsilon; //allocate on the device errCode=cudaMalloc((void**)&dev_epsilon, sizeof(double)); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; //2 means not enough memory } //size of the database: dev_N=(unsigned int*)malloc(sizeof( unsigned int )); //*dev_N=N; //allocate on the device errCode=cudaMalloc((void**)&dev_N, sizeof(unsigned int)); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; //2 means not enough memory } //debug values //unsigned int * dev_debug1; //unsigned int * dev_debug2; dev_debug1=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug1=0; dev_debug2=(unsigned int *)malloc(sizeof(unsigned int )); *dev_debug2=0; //allocate on the device errCode=cudaMalloc( (unsigned int **)&dev_debug1, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug1 Got error with code " << errCode << endl; //2 means not enough memory } errCode=cudaMalloc( (unsigned int **)&dev_debug2, sizeof(unsigned int ) ); if(errCode != cudaSuccess) { cout << "\nError: debug2 Got error with code " << errCode << endl; //2 means not enough memory } //copy N, epsilon and cnt to the device //epsilon errCode=cudaMemcpy( dev_epsilon, epsilon, sizeof(double), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: epsilon Got error with code " << errCode << endl; //2 means not enough memory } //cnt // errCode=cudaMemcpy( dev_cnt, cnt, sizeof(unsigned int), cudaMemcpyHostToDevice ); // if(errCode != cudaSuccess) { // cout << "\nError: cnt Got error with code " << errCode << endl; //2 means not enough memory // } //N errCode=cudaMemcpy( dev_N, N, sizeof(unsigned int), cudaMemcpyHostToDevice ); if(errCode != cudaSuccess) { cout << "\nError: N Got error with code " << errCode << endl; //2 means not enough memory } //printf("\nnumber of elements: %u,%u",*dev_N,N); } void allocateResultSet(struct structresults * dev_results, struct structresults * results) { //dev_results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); cudaError_t errCode=cudaMalloc((void **)&dev_results, sizeof(struct structresults)*BUFFERELEM); if(errCode != cudaSuccess) { cout << "CUDA: Got error with code " << errCode << endl; //2 means not enough memory } printf("\nmemory requested for results (GiB): %f",(double)(sizeof(struct structresults)*BUFFERELEM)/(1024*1024*1024)); //host result allocation: results=(struct structresults*)malloc(sizeof(struct structresults)*BUFFERELEM); } */ bool compResults(structresults const& lhs, structresults const& rhs) { if (lhs.pointID != rhs.pointID) return (lhs.pointID < rhs.pointID); if (lhs.pointInDist != rhs.pointInDist) { return (lhs.pointInDist < rhs.pointInDist); } return (lhs.pointInDist > rhs.pointInDist); }
03e450eca59d01a833da012bdfe0bf490ebd8e31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2014, September 8 - October 10 // ### // ### // ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff // ### // ### // ### // ### // ### Dennis Mack, [email protected], p060 // ### Adrian Haarbach, [email protected], p077 // ### Markus Schlaffer, [email protected], p070 #include <helper.h> #include <iostream> #include <math.h> //#include <stdio.h> using namespace std; // uncomment to use the camera //#define CAMERA void gammaCPU(float *imgIn, float *imgOut, size_t n, float gamma){ for(size_t i=0;i<n;i++){ imgOut[i]=powf(imgIn[i],gamma); } } __global__ void gammaGPU(float *imgIn, float *imgOut, size_t pw, size_t w, size_t h, size_t nc, float gamma){ int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x>=w || y>=h) return; for(int c=0; c<nc; c++){ imgOut[x+y*pw+c*h*pw]=powf(imgIn[x+y*pw+c*h*pw],gamma); } } float GetAverage(float dArray[], int iSize) { float dSum = dArray[0]; for (int i = 1; i < iSize; ++i) { dSum += dArray[i]; } return dSum/iSize; } int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate hipDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed float gamma=0.5f; getParam("gamma", gamma, argc, argv); cout << "gamma: " << gamma << endl; // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array size_t n = (size_t)w*h*nc; float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); float *tc, *tg, *tg2; tc=(float*)malloc(repeats * sizeof(float)); tg=(float*)malloc(repeats * sizeof(float)); tg2=(float*)malloc(repeats * sizeof(float)); for(int i=0;i<repeats;i++){ //CPU: Timer timercpu, timergpu, timergpu2; timercpu.start(); gammaCPU(imgIn,imgOut,n,gamma); timercpu.end(); tc[i] = timercpu.get(); // elapsed time in seconds //GPU: timergpu.start(); float *d_imgIn, *d_imgOut; size_t pitchImgIn, pitchImgOut; hipMallocPitch(&d_imgIn, &pitchImgIn, w * sizeof(float), h*nc); CUDA_CHECK; hipMemcpy2D(d_imgIn, pitchImgIn, imgIn, w * sizeof(float), w * sizeof(float), h*nc, hipMemcpyHostToDevice); CUDA_CHECK; hipMallocPitch(&d_imgOut, &pitchImgOut, w * sizeof(float), h*nc); CUDA_CHECK; size_t pw=pitchImgIn / sizeof(float); timergpu2.start(); dim3 block = dim3(32,4,1); dim3 grid = dim3((w + block.x - 1 ) / block.x, (h + block.y - 1 ) / block.y, 1); hipLaunchKernelGGL(( gammaGPU) , dim3(grid),dim3(block), 0, 0, d_imgIn, d_imgOut, pw, w, h, nc, gamma); CUDA_CHECK; hipDeviceSynchronize(); timergpu2.end(); tg2[i] = timergpu2.get(); CUDA_CHECK; hipMemcpy2D(imgOut, w * sizeof(float), d_imgOut, pitchImgOut, w * sizeof(float), h*nc, hipMemcpyDeviceToHost); CUDA_CHECK; hipFree(d_imgOut); CUDA_CHECK; hipFree(d_imgIn); CUDA_CHECK; timergpu.end(); tg[i] = timergpu.get(); // elapsed time in seconds } cout << "avg time cpu: " << GetAverage(tc, repeats)*1000 << " ms" << endl; cout << "avg time gpu: " << GetAverage(tg, repeats)*1000 << " ms" << endl; cout << "avg time gpu allocfree: " << GetAverage(tg2, repeats)*1000 << " ms" << endl; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
03e450eca59d01a833da012bdfe0bf490ebd8e31.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2014, September 8 - October 10 // ### // ### // ### Maria Klodt, Jan Stuehmer, Mohamed Souiai, Thomas Moellenhoff // ### // ### // ### // ### // ### Dennis Mack, [email protected], p060 // ### Adrian Haarbach, [email protected], p077 // ### Markus Schlaffer, [email protected], p070 #include <helper.h> #include <iostream> #include <math.h> //#include <stdio.h> using namespace std; // uncomment to use the camera //#define CAMERA void gammaCPU(float *imgIn, float *imgOut, size_t n, float gamma){ for(size_t i=0;i<n;i++){ imgOut[i]=powf(imgIn[i],gamma); } } __global__ void gammaGPU(float *imgIn, float *imgOut, size_t pw, size_t w, size_t h, size_t nc, float gamma){ int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x>=w || y>=h) return; for(int c=0; c<nc; c++){ imgOut[x+y*pw+c*h*pw]=powf(imgIn[x+y*pw+c*h*pw],gamma); } } float GetAverage(float dArray[], int iSize) { float dSum = dArray[0]; for (int i = 1; i < iSize; ++i) { dSum += dArray[i]; } return dSum/iSize; } int main(int argc, char **argv) { // Before the GPU can process your kernels, a so called "CUDA context" must be initialized // This happens on the very first call to a CUDA function, and takes some time (around half a second) // We will do it right here, so that the run time measurements are accurate cudaDeviceSynchronize(); CUDA_CHECK; // Reading command line parameters: // getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var" // If "-param" is not specified, the value of "var" remains unchanged // // return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise #ifdef CAMERA #else // input image string image = ""; bool ret = getParam("i", image, argc, argv); if (!ret) cerr << "ERROR: no image specified" << endl; if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; } #endif // number of computation repetitions to get a better run time measurement int repeats = 1; getParam("repeats", repeats, argc, argv); cout << "repeats: " << repeats << endl; // load the input image as grayscale if "-gray" is specifed bool gray = false; getParam("gray", gray, argc, argv); cout << "gray: " << gray << endl; // ### Define your own parameters here as needed float gamma=0.5f; getParam("gamma", gamma, argc, argv); cout << "gamma: " << gamma << endl; // Init camera / Load input image #ifdef CAMERA // Init camera cv::VideoCapture camera(0); if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; } int camW = 640; int camH = 480; camera.set(CV_CAP_PROP_FRAME_WIDTH,camW); camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH); // read in first frame to get the dimensions cv::Mat mIn; camera >> mIn; #else // Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale)) cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1)); // check if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; } #endif // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; // get image dimensions int w = mIn.cols; // width int h = mIn.rows; // height int nc = mIn.channels(); // number of channels cout << "image: " << w << " x " << h << endl; // Set the output image format // ### // ### // ### TODO: Change the output image format as needed // ### // ### cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers //cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers //cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer // ### Define your own output images here as needed // Allocate arrays // input/output image width: w // input/output image height: h // input image number of channels: nc // output image number of channels: mOut.channels(), as defined above (nc, 3, or 1) // allocate raw input image array size_t n = (size_t)w*h*nc; float *imgIn = new float[(size_t)w*h*nc]; // allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying) float *imgOut = new float[(size_t)w*h*mOut.channels()]; // For camera mode: Make a loop to read in camera frames #ifdef CAMERA // Read a camera image frame every 30 milliseconds: // cv::waitKey(30) waits 30 milliseconds for a keyboard input, // returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed while (cv::waitKey(30) < 0) { // Get camera image camera >> mIn; // convert to float representation (opencv loads image values as single bytes by default) mIn.convertTo(mIn,CV_32F); // convert range of each channel to [0,1] (opencv default is [0,255]) mIn /= 255.f; #endif // Init raw input image array // opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...) // But for CUDA it's better to work with layered images: rrr... ggg... bbb... // So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations convert_mat_to_layered (imgIn, mIn); float *tc, *tg, *tg2; tc=(float*)malloc(repeats * sizeof(float)); tg=(float*)malloc(repeats * sizeof(float)); tg2=(float*)malloc(repeats * sizeof(float)); for(int i=0;i<repeats;i++){ //CPU: Timer timercpu, timergpu, timergpu2; timercpu.start(); gammaCPU(imgIn,imgOut,n,gamma); timercpu.end(); tc[i] = timercpu.get(); // elapsed time in seconds //GPU: timergpu.start(); float *d_imgIn, *d_imgOut; size_t pitchImgIn, pitchImgOut; cudaMallocPitch(&d_imgIn, &pitchImgIn, w * sizeof(float), h*nc); CUDA_CHECK; cudaMemcpy2D(d_imgIn, pitchImgIn, imgIn, w * sizeof(float), w * sizeof(float), h*nc, cudaMemcpyHostToDevice); CUDA_CHECK; cudaMallocPitch(&d_imgOut, &pitchImgOut, w * sizeof(float), h*nc); CUDA_CHECK; size_t pw=pitchImgIn / sizeof(float); timergpu2.start(); dim3 block = dim3(32,4,1); dim3 grid = dim3((w + block.x - 1 ) / block.x, (h + block.y - 1 ) / block.y, 1); gammaGPU <<<grid,block>>> (d_imgIn, d_imgOut, pw, w, h, nc, gamma); CUDA_CHECK; cudaDeviceSynchronize(); timergpu2.end(); tg2[i] = timergpu2.get(); CUDA_CHECK; cudaMemcpy2D(imgOut, w * sizeof(float), d_imgOut, pitchImgOut, w * sizeof(float), h*nc, cudaMemcpyDeviceToHost); CUDA_CHECK; cudaFree(d_imgOut); CUDA_CHECK; cudaFree(d_imgIn); CUDA_CHECK; timergpu.end(); tg[i] = timergpu.get(); // elapsed time in seconds } cout << "avg time cpu: " << GetAverage(tc, repeats)*1000 << " ms" << endl; cout << "avg time gpu: " << GetAverage(tg, repeats)*1000 << " ms" << endl; cout << "avg time gpu allocfree: " << GetAverage(tg2, repeats)*1000 << " ms" << endl; // show input image showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100) // show output image: first convert to interleaved opencv format from the layered raw array convert_layered_to_mat(mOut, imgOut); showImage("Output", mOut, 100+40, 100); // ### Display your own output images here as needed #ifdef CAMERA // end of camera loop } #else // wait for key inputs cv::waitKey(0); #endif // save input and result cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255] cv::imwrite("image_result.png",mOut*255.f); // free allocated arrays delete[] imgIn; delete[] imgOut; // close all opencv windows cvDestroyAllWindows(); return 0; }
a733c129364219e88530d4887952ee8de348b1c7.hip
// !!! This is a file automatically generated by hipify!!! //CUDA Library #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <rocblas.h> #include <hiprand/hiprand.h> //C++ Library #include <stdio.h> #include <cstdio> #include <iostream> #include <fstream> //Personal Library #include "headers.hpp" #include "smartptr.hpp" #include "storage.h" //OPENCV Library #include <opencv2/core/core.hpp> using namespace std; using namespace cv; //MACRO #define SIZEOF(width, height, typevariable) sizeof(typevariable)*(width*height) #define LOC(width, height, widthOfMat) (height*widthOfMat) + width #define POW(width, height) width * height //DEFINITION const int SIZE = 2<<27; //Prototype Function Declaration template <unsigned int convWidth, unsigned int widthofmat> __global__ void Map_Gather(int*, int*, int*); __host__ void map_gather(); void curand_function(); class image_config{ public: image_config() = delete; void read_image(string); private: }; class deepLearning{ public: void display(int width, int height, int* ptr); private: int* dMem = nullptr; int* OdMem = nullptr; int* OhMem = nullptr; int* ConvdMem = nullptr; int* ConvhMem = nullptr; int* hMem = nullptr; //TEST PARM int print; const int imageSize = 10; const int convSize = imageSize - 2; }; int main(int argc, char* argv[]) { try { // map_gather(); curand_function(); cout << "PROGRAM FINISHED" << endl; } catch (const errorExcept& excep) { cerr << "Error found at line " << excep.Line << " with error code " << excep.type << " and error type " << excep.type << endl;; return -1; } cin.get(); } template <unsigned int convWidth, unsigned int widthofmat> __global__ void Map_Gather(int* input, int* output, int* convlayer) { int loResult = 0; const int ix = threadIdx.x; const int iy = threadIdx.y; for (int i_y = 0; i_y < 3; ++i_y) { for (int i_x = 0; i_x < 3; ++i_x) { loResult += convlayer[LOC(i_x, i_y, 3)] * input[LOC((ix + i_x), (iy + i_y), widthofmat)]; } } output[LOC(ix, iy, convWidth)] = loResult; } __host__ void map_gather() { cout << "RUNNING 'Map and Gather Function'" << endl; const int imageSize = 10; const int convSize = imageSize - 2; int* dMem = nullptr; int* OdMem = nullptr; int* OhMem = nullptr; int* ConvdMem = nullptr; int* ConvhMem = nullptr; int* hMem = nullptr; cuda(hipMalloc(&dMem, SIZEOF(imageSize, imageSize, int))); cuda(hipMalloc(&OdMem, SIZEOF(convSize, convSize, int))); cuda(hipHostMalloc(&OhMem, SIZEOF(convSize, convSize, int), hipHostMallocDefault)); cuda(hipMalloc(&ConvdMem, SIZEOF(3, 3, int))); cuda(hipHostMalloc(&hMem, SIZEOF(imageSize, imageSize, int), hipHostMallocDefault)); cuda(hipHostMalloc(&ConvhMem, SIZEOF(3, 3, int), hipHostMallocDefault)); for (int i = 0; i < POW(imageSize, imageSize); ++i) { hMem[i] = 0; } hMem[LOC(1, 1, imageSize)] = 1; hMem[LOC(1, 2, imageSize)] = 1; hMem[LOC(1, 3, imageSize)] = 1; hMem[LOC(5, 1, imageSize)] = 1; hMem[LOC(4, 2, imageSize)] = 1; hMem[LOC(3, 3, imageSize)] = 1; ConvhMem[0] = 0; ConvhMem[1] = 0; ConvhMem[2] = 9; ConvhMem[3] = 0; ConvhMem[4] = 9; ConvhMem[5] = 0; ConvhMem[6] = 9; ConvhMem[7] = 0; ConvhMem[8] = 0; cout << "IMAGE LAYER" << endl; // display(imageSize, imageSize, hMem); cout << "CONV LAYER" << endl; // display(3, 3, ConvhMem); cuda(hipMemcpy(dMem, hMem, SIZEOF(imageSize, imageSize, int), hipMemcpyHostToDevice)); cuda(hipMemcpy(ConvdMem, ConvhMem, SIZEOF(3, 3, int), hipMemcpyHostToDevice)); dim3 threads(convSize, convSize); hipLaunchKernelGGL(( Map_Gather<convSize, imageSize>), dim3(1), dim3(threads), 0, 0, dMem, OdMem, ConvdMem); cuda(hipMemcpy(OhMem, OdMem, SIZEOF(convSize, convSize, int), hipMemcpyDeviceToHost)); cout << "ACTIVATION LAYER 1" << endl; // display(convSize, convSize, OhMem); cuda(hipFree(dMem)); cuda(hipFree(ConvdMem)); cuda(hipFree(OdMem)); cuda(hipHostFree(OhMem)); cuda(hipHostFree(hMem)); cuda(hipHostFree(ConvhMem)); dMem = nullptr; OdMem = nullptr; OhMem = nullptr; ConvdMem = nullptr; ConvhMem = nullptr; hMem = nullptr; } void curand_function() { //Declare CURAND Generator hiprandGenerator_t generator; //Create CURAND Generator crand(hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT)); //Set CURAND Seed crand(hiprandSetPseudoRandomGeneratorSeed(generator, 8888LL)); //Declare GPU MEM unsigned int* memNumber = nullptr; //Declare CPU MEM ptr::uPtr<unsigned int> cpuMem(new unsigned int[SIZE]); cuda(hipMalloc(&memNumber, sizeof(int) * SIZE)); crand(hiprandGenerate(generator, memNumber, SIZE)); cuda(hipMemcpy(*cpuMem, memNumber, sizeof(unsigned int)* SIZE, hipMemcpyDeviceToHost)); storage st(cpuMem, SIZE); st.write_to_file("FILE"); st.read_from_file("FILE"); st.print(SIZE); //FREE MEMORY crand(hiprandDestroyGenerator(generator)); cuda(hipFree(memNumber)); //Initialize To NULLls generator = nullptr; memNumber = nullptr; } void image_config::read_image(string fileName = "") { Mat image; } void deepLearning::display(int width, int height, int* ptr) { for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { cout << ptr[LOC(x, y, width)] << ","; } cout << endl; } }
a733c129364219e88530d4887952ee8de348b1c7.cu
//CUDA Library #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cublas_v2.h> #include <curand.h> //C++ Library #include <stdio.h> #include <cstdio> #include <iostream> #include <fstream> //Personal Library #include "headers.hpp" #include "smartptr.hpp" #include "storage.h" //OPENCV Library #include <opencv2/core/core.hpp> using namespace std; using namespace cv; //MACRO #define SIZEOF(width, height, typevariable) sizeof(typevariable)*(width*height) #define LOC(width, height, widthOfMat) (height*widthOfMat) + width #define POW(width, height) width * height //DEFINITION const int SIZE = 2<<27; //Prototype Function Declaration template <unsigned int convWidth, unsigned int widthofmat> __global__ void Map_Gather(int*, int*, int*); __host__ void map_gather(); void curand_function(); class image_config{ public: image_config() = delete; void read_image(string); private: }; class deepLearning{ public: void display(int width, int height, int* ptr); private: int* dMem = nullptr; int* OdMem = nullptr; int* OhMem = nullptr; int* ConvdMem = nullptr; int* ConvhMem = nullptr; int* hMem = nullptr; //TEST PARM int print; const int imageSize = 10; const int convSize = imageSize - 2; }; int main(int argc, char* argv[]) { try { // map_gather(); curand_function(); cout << "PROGRAM FINISHED" << endl; } catch (const errorExcept& excep) { cerr << "Error found at line " << excep.Line << " with error code " << excep.type << " and error type " << excep.type << endl;; return -1; } cin.get(); } template <unsigned int convWidth, unsigned int widthofmat> __global__ void Map_Gather(int* input, int* output, int* convlayer) { int loResult = 0; const int ix = threadIdx.x; const int iy = threadIdx.y; for (int i_y = 0; i_y < 3; ++i_y) { for (int i_x = 0; i_x < 3; ++i_x) { loResult += convlayer[LOC(i_x, i_y, 3)] * input[LOC((ix + i_x), (iy + i_y), widthofmat)]; } } output[LOC(ix, iy, convWidth)] = loResult; } __host__ void map_gather() { cout << "RUNNING 'Map and Gather Function'" << endl; const int imageSize = 10; const int convSize = imageSize - 2; int* dMem = nullptr; int* OdMem = nullptr; int* OhMem = nullptr; int* ConvdMem = nullptr; int* ConvhMem = nullptr; int* hMem = nullptr; cuda(cudaMalloc(&dMem, SIZEOF(imageSize, imageSize, int))); cuda(cudaMalloc(&OdMem, SIZEOF(convSize, convSize, int))); cuda(cudaHostAlloc(&OhMem, SIZEOF(convSize, convSize, int), cudaHostAllocDefault)); cuda(cudaMalloc(&ConvdMem, SIZEOF(3, 3, int))); cuda(cudaHostAlloc(&hMem, SIZEOF(imageSize, imageSize, int), cudaHostAllocDefault)); cuda(cudaHostAlloc(&ConvhMem, SIZEOF(3, 3, int), cudaHostAllocDefault)); for (int i = 0; i < POW(imageSize, imageSize); ++i) { hMem[i] = 0; } hMem[LOC(1, 1, imageSize)] = 1; hMem[LOC(1, 2, imageSize)] = 1; hMem[LOC(1, 3, imageSize)] = 1; hMem[LOC(5, 1, imageSize)] = 1; hMem[LOC(4, 2, imageSize)] = 1; hMem[LOC(3, 3, imageSize)] = 1; ConvhMem[0] = 0; ConvhMem[1] = 0; ConvhMem[2] = 9; ConvhMem[3] = 0; ConvhMem[4] = 9; ConvhMem[5] = 0; ConvhMem[6] = 9; ConvhMem[7] = 0; ConvhMem[8] = 0; cout << "IMAGE LAYER" << endl; // display(imageSize, imageSize, hMem); cout << "CONV LAYER" << endl; // display(3, 3, ConvhMem); cuda(cudaMemcpy(dMem, hMem, SIZEOF(imageSize, imageSize, int), cudaMemcpyHostToDevice)); cuda(cudaMemcpy(ConvdMem, ConvhMem, SIZEOF(3, 3, int), cudaMemcpyHostToDevice)); dim3 threads(convSize, convSize); Map_Gather<convSize, imageSize><<< 1, threads>>>(dMem, OdMem, ConvdMem); cuda(cudaMemcpy(OhMem, OdMem, SIZEOF(convSize, convSize, int), cudaMemcpyDeviceToHost)); cout << "ACTIVATION LAYER 1" << endl; // display(convSize, convSize, OhMem); cuda(cudaFree(dMem)); cuda(cudaFree(ConvdMem)); cuda(cudaFree(OdMem)); cuda(cudaFreeHost(OhMem)); cuda(cudaFreeHost(hMem)); cuda(cudaFreeHost(ConvhMem)); dMem = nullptr; OdMem = nullptr; OhMem = nullptr; ConvdMem = nullptr; ConvhMem = nullptr; hMem = nullptr; } void curand_function() { //Declare CURAND Generator curandGenerator_t generator; //Create CURAND Generator crand(curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT)); //Set CURAND Seed crand(curandSetPseudoRandomGeneratorSeed(generator, 8888LL)); //Declare GPU MEM unsigned int* memNumber = nullptr; //Declare CPU MEM ptr::uPtr<unsigned int> cpuMem(new unsigned int[SIZE]); cuda(cudaMalloc(&memNumber, sizeof(int) * SIZE)); crand(curandGenerate(generator, memNumber, SIZE)); cuda(cudaMemcpy(*cpuMem, memNumber, sizeof(unsigned int)* SIZE, cudaMemcpyDeviceToHost)); storage st(cpuMem, SIZE); st.write_to_file("FILE"); st.read_from_file("FILE"); st.print(SIZE); //FREE MEMORY crand(curandDestroyGenerator(generator)); cuda(cudaFree(memNumber)); //Initialize To NULLls generator = nullptr; memNumber = nullptr; } void image_config::read_image(string fileName = "") { Mat image; } void deepLearning::display(int width, int height, int* ptr) { for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { cout << ptr[LOC(x, y, width)] << ","; } cout << endl; } }
f3c989277653061092a32d3223cf00e3a99aecb4.hip
// !!! This is a file automatically generated by hipify!!! // // Created by mrjakobdk on 5/26/20. // #include <cmath> #include "../clustering/ClusteringCpu.h" #include "../../structures/ScyTreeNode.h" #include "../../structures/ScyTreeArray.h" #include "../../utils/util.h" #include "../../utils/TmpMalloc.cuh" #include "InscyCompare.cuh" #include <map> #include <vector> using namespace std; bool compare_arrays(int *array_1, int *array_2, int n) { bool identical = true; for (int i = 0; i < n; i++) { if (array_1[i] != array_2[i]) identical = false; } return identical; } void pairsort(int a[], int b[], const int n) { //https://www.geeksforgeeks.org/sorting-array-according-another-array-using-pair-stl/ pair<int, int> *pairt = new pair<int, int>[n]; // Storing the respective array // elements in pairs. for (int i = 0; i < n; i++) { pairt[i].first = a[i]; pairt[i].second = b[i]; } // Sorting the pair array. sort(pairt, pairt + n); // Modifying original arrays for (int i = 0; i < n; i++) { a[i] = pairt[i].first; b[i] = pairt[i].second; } } void compare(ScyTreeArray *scy_tree_1, ScyTreeArray *scy_tree_2) { //todo check parents, cells, counts, dims, restricted dims, dim start, points and points_placement if (scy_tree_1->number_of_nodes != scy_tree_2->number_of_nodes) { printf("number_of_nodes are not the same! %d and %d\n", scy_tree_1->number_of_nodes, scy_tree_2->number_of_nodes); printf("Parents:\n"); print_array(scy_tree_1->h_parents, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_parents, scy_tree_2->number_of_nodes); printf("Cells:\n"); print_array(scy_tree_1->h_cells, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_cells, scy_tree_2->number_of_nodes); printf("Counts:\n"); print_array(scy_tree_1->h_counts, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_counts, scy_tree_2->number_of_nodes); throw 20; return; } if (scy_tree_1->number_of_cells != scy_tree_2->number_of_cells) { printf("number_of_cells are not the same! %d and %d\n", scy_tree_1->number_of_cells, scy_tree_2->number_of_cells); throw 20; return; } if (scy_tree_1->number_of_points != scy_tree_2->number_of_points) { printf("number_of_points are not the same! %d and %d\n", scy_tree_1->number_of_points, scy_tree_2->number_of_points); printf("Counts:\n"); print_array(scy_tree_1->h_counts, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_counts, scy_tree_2->number_of_nodes); printf("Points:\n"); print_array(scy_tree_1->h_points, scy_tree_1->number_of_points); print_array(scy_tree_2->h_points, scy_tree_2->number_of_points); printf("Placement:\n"); print_array(scy_tree_1->h_points_placement, scy_tree_1->number_of_points); print_array(scy_tree_2->h_points_placement, scy_tree_2->number_of_points); throw 20; return; } if (scy_tree_1->number_of_dims != scy_tree_2->number_of_dims) { printf("number_of_dims are not the same! %d and %d\n", scy_tree_1->number_of_dims, scy_tree_2->number_of_dims); throw 20; return; } if (scy_tree_1->number_of_restricted_dims != scy_tree_2->number_of_restricted_dims) { printf("number_of_restricted_dims are not the same! %d and %d\n", scy_tree_1->number_of_restricted_dims, scy_tree_2->number_of_restricted_dims); throw 20; return; } //todo is_s_connected failes at rnd if ((scy_tree_1->is_s_connected ? 1 : 0) != (scy_tree_2->is_s_connected ? 1 : 0)) { printf("is_s_connected are not the same! %s and %s\n", scy_tree_1->is_s_connected ? "true" : "false", scy_tree_2->is_s_connected ? "true" : "false"); throw 20; return; } int sum_1 = 0; int sum_2 = 0; for (int i = 0; i < scy_tree_1->number_of_nodes; i++) { sum_1 += scy_tree_1->h_counts[i]; sum_2 += scy_tree_2->h_counts[i]; // if (scy_tree_1->h_parents[i] != scy_tree_2->h_parents[i]) { // printf("h_parents are not the same! differ at %d\n", i); // print_array(scy_tree_1->h_parents, scy_tree_1->number_of_nodes); // print_array(scy_tree_2->h_parents, scy_tree_2->number_of_nodes); // throw 20; // return; // } // if (scy_tree_1->h_cells[i] != scy_tree_2->h_cells[i]) { // printf("h_cells are not the same! differ at %d\n", i); // print_array(scy_tree_1->h_cells, scy_tree_1->number_of_nodes); // print_array(scy_tree_2->h_cells, scy_tree_2->number_of_nodes); // // scy_tree_2->copy_to_device(); // // print_array_gpu<<<1, 1>>>(scy_tree_1->d_cells, scy_tree_1->number_of_nodes); // hipDeviceSynchronize(); // print_array_gpu<<<1, 1>>>(scy_tree_2->d_cells, scy_tree_2->number_of_nodes); // hipDeviceSynchronize(); // // throw 20; // return; // } // if (scy_tree_1->h_counts[i] != scy_tree_2->h_counts[i]) { // printf("h_counts are not the same! differ at %d\n", i); // print_array(scy_tree_1->h_counts, scy_tree_1->number_of_nodes); // print_array(scy_tree_2->h_counts, scy_tree_2->number_of_nodes); // throw 20; // return; // } } if (sum_1 != sum_2) { printf("h_counts are not the same! sum_1:%d, sum_2:%d\n", sum_1, sum_2); print_array(scy_tree_1->h_counts, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_counts, scy_tree_2->number_of_nodes); throw 20; return; } for (int i = 0; i < scy_tree_1->number_of_dims; i++) { if (scy_tree_1->h_dims[i] != scy_tree_2->h_dims[i]) { printf("h_dims are not the same! differ at %d\n", i); print_array(scy_tree_1->h_dims, scy_tree_1->number_of_dims); print_array(scy_tree_2->h_dims, scy_tree_2->number_of_dims); throw 20; return; } if (scy_tree_1->h_dim_start[i] != scy_tree_2->h_dim_start[i]) { printf("h_dim_start are not the same! differ at %d\n", i); print_array(scy_tree_1->h_dim_start, scy_tree_1->number_of_dims); print_array(scy_tree_2->h_dim_start, scy_tree_2->number_of_dims); throw 20; return; } } for (int i = 0; i < scy_tree_1->number_of_restricted_dims; i++) { if (scy_tree_1->h_restricted_dims[i] != scy_tree_2->h_restricted_dims[i]) { printf("h_restricted_dims are not the same! differ at %d\n", i); print_array(scy_tree_1->h_restricted_dims, scy_tree_1->number_of_restricted_dims); print_array(scy_tree_2->h_restricted_dims, scy_tree_2->number_of_restricted_dims); throw 20; return; } } pairsort(scy_tree_1->h_points, scy_tree_1->h_points_placement, scy_tree_1->number_of_points); pairsort(scy_tree_2->h_points, scy_tree_2->h_points_placement, scy_tree_2->number_of_points); for (int i = 0; i < scy_tree_1->number_of_points; i++) { if (scy_tree_1->h_points[i] != scy_tree_2->h_points[i]) { printf("h_points are not the same! differ at %d\n", i); print_array(scy_tree_1->h_points, scy_tree_1->number_of_points); print_array(scy_tree_2->h_points, scy_tree_2->number_of_points); printf("Placement:\n"); print_array(scy_tree_1->h_points_placement, scy_tree_1->number_of_points); print_array(scy_tree_2->h_points_placement, scy_tree_2->number_of_points); throw 20; return; } // if (scy_tree_1->h_points_placement[i] != scy_tree_2->h_points_placement[i]) { // printf("h_points_placement are not the same! differ at %d\n", i); // print_array(scy_tree_1->h_points_placement, scy_tree_1->number_of_points); // print_array(scy_tree_2->h_points_placement, scy_tree_2->number_of_points); // printf("Points:\n"); // print_array(scy_tree_1->h_points, scy_tree_1->number_of_points); // print_array(scy_tree_2->h_points, scy_tree_2->number_of_points); // throw 20; // return; // } } // printf("Success!\n"); } void INSCYCompare(ScyTreeNode *scy_tree, ScyTreeNode *neighborhood_tree, at::Tensor X, int n, float neighborhood_size, float F, int num_obj, int min_size, map <vector<int>, vector<int>, vec_cmp> &result, int first_dim_no, int d, int &calls) { // printf("call: %d, first_dim_no: %d, points: %d\n", calls, first_dim_no, scy_tree->number_of_points); // scy_tree->print(); ScyTreeArray *scy_tree_gpu = scy_tree->convert_to_ScyTreeArray(); scy_tree_gpu->copy_to_device(); // if(d - first_dim_no != scy_tree_gpu->number_of_dims) { // printf("not the same!!! %d vs %d\n", d - first_dim_no, scy_tree_gpu->number_of_dims); // } vector <vector<ScyTreeArray *>> L = scy_tree_gpu->restrict_gpu_multi(first_dim_no, d - first_dim_no, scy_tree_gpu->number_of_cells); TmpMalloc *tmps = new TmpMalloc(); vector <vector<ScyTreeArray *>> L_merged = scy_tree_gpu->restrict_merge_gpu_multi2(tmps, first_dim_no, d - first_dim_no, scy_tree_gpu->number_of_cells); int dim_no = first_dim_no; calls++; while (dim_no < d) { int cell_no = 0; int i = dim_no - first_dim_no; vector<int> subspace_clustering(n, -1); vector<int> subspace; int count = 0; while (cell_no < scy_tree->number_of_cells) { //restricted-tree := restrict(scy-tree, descriptor); ScyTreeNode *restricted_scy_tree = scy_tree->restrict(dim_no, cell_no); ScyTreeArray *restricted_scy_tree_conv = restricted_scy_tree->convert_to_ScyTreeArray(); ScyTreeArray *restricted_scy_tree_gpu = scy_tree_gpu->restrict_gpu(dim_no, cell_no); restricted_scy_tree_gpu->copy_to_host(); // ScyTreeArray *restricted_scy_tree_gpu_3 = scy_tree_gpu->restrict3_gpu(dim_no, cell_no); // restricted_scy_tree_gpu_3->copy_to_host(); ScyTreeArray *restricted_scy_tree_gpu_multi = L[i][cell_no]; restricted_scy_tree_gpu_multi->copy_to_host(); subspace = vector<int>(restricted_scy_tree->restricted_dims, restricted_scy_tree->restricted_dims + restricted_scy_tree->number_of_restricted_dims); // printf("After restrict:\n"); compare(restricted_scy_tree_gpu, restricted_scy_tree_conv); compare(restricted_scy_tree_gpu, restricted_scy_tree_gpu_multi); delete restricted_scy_tree_gpu_multi; //restricted-tree := mergeWithNeighbors(restricted-tree); //updates cell_no if merged with neighbors int cell_no_gpu = cell_no; restricted_scy_tree->mergeWithNeighbors(scy_tree, dim_no, cell_no); restricted_scy_tree_conv = restricted_scy_tree->convert_to_ScyTreeArray(); restricted_scy_tree_gpu = restricted_scy_tree_gpu->mergeWithNeighbors_gpu1(scy_tree_gpu, dim_no, cell_no_gpu); restricted_scy_tree_gpu->copy_to_host(); ScyTreeArray *merge_scy_tree_gpu_multi = L_merged[i][count]; merge_scy_tree_gpu_multi->copy_to_host(); // printf("After merge:\n"); compare(restricted_scy_tree_gpu, restricted_scy_tree_conv); compare(restricted_scy_tree_gpu, merge_scy_tree_gpu_multi); // printf("After merge:\n"); //pruneRecursion(restricted-tree); //prune sparse regions if (restricted_scy_tree->pruneRecursion(min_size, neighborhood_tree, X, neighborhood_size, restricted_scy_tree->restricted_dims, restricted_scy_tree->number_of_restricted_dims, F, num_obj, n, d)) { //INSCY(restricted-tree,result); //depth-first via recursion map <vector<int>, vector<int>, vec_cmp> sub_result; INSCYCompare(restricted_scy_tree, neighborhood_tree, X, n, neighborhood_size, F, num_obj, min_size, sub_result, dim_no + 1, d, calls); result.insert(sub_result.begin(), sub_result.end()); //pruneRedundancy(restricted-tree); //in-process-removal restricted_scy_tree->pruneRedundancy(0.5, sub_result);//todo does nothing atm //result := DBClustering(restricted-tree) result; int idx = restricted_scy_tree->get_dims_idx(); INSCYClusteringImplCPU(restricted_scy_tree, neighborhood_tree, X, n, neighborhood_size, F, num_obj, subspace_clustering, min_size, 0.5, result); // if (result.count(idx)) { // vector<int> clustering = result[idx]; // int m = v_max(clustering); // if (m < 0) { // result[idx] = new_clustering; // } else { // for (int i = 0; i < n; i++) { // if (new_clustering[i] == -2) { // clustering[i] = new_clustering[i]; // } else if (new_clustering[i] >= 0) { // clustering[i] = m + 1 + new_clustering[i]; // } // } // result[idx] = clustering; // } // } else { // result.insert(pair < int, vector < int >> (idx, new_clustering)); // } } count++; cell_no++; } result.insert(pair < vector < int > , vector < int >> (subspace, subspace_clustering)); dim_no++; } int total_inscy = pow(2, d); printf("CPU-INSCY(%d): %d%% \r", calls, int((result.size() * 100) / total_inscy)); }
f3c989277653061092a32d3223cf00e3a99aecb4.cu
// // Created by mrjakobdk on 5/26/20. // #include <cmath> #include "../clustering/ClusteringCpu.h" #include "../../structures/ScyTreeNode.h" #include "../../structures/ScyTreeArray.h" #include "../../utils/util.h" #include "../../utils/TmpMalloc.cuh" #include "InscyCompare.cuh" #include <map> #include <vector> using namespace std; bool compare_arrays(int *array_1, int *array_2, int n) { bool identical = true; for (int i = 0; i < n; i++) { if (array_1[i] != array_2[i]) identical = false; } return identical; } void pairsort(int a[], int b[], const int n) { //https://www.geeksforgeeks.org/sorting-array-according-another-array-using-pair-stl/ pair<int, int> *pairt = new pair<int, int>[n]; // Storing the respective array // elements in pairs. for (int i = 0; i < n; i++) { pairt[i].first = a[i]; pairt[i].second = b[i]; } // Sorting the pair array. sort(pairt, pairt + n); // Modifying original arrays for (int i = 0; i < n; i++) { a[i] = pairt[i].first; b[i] = pairt[i].second; } } void compare(ScyTreeArray *scy_tree_1, ScyTreeArray *scy_tree_2) { //todo check parents, cells, counts, dims, restricted dims, dim start, points and points_placement if (scy_tree_1->number_of_nodes != scy_tree_2->number_of_nodes) { printf("number_of_nodes are not the same! %d and %d\n", scy_tree_1->number_of_nodes, scy_tree_2->number_of_nodes); printf("Parents:\n"); print_array(scy_tree_1->h_parents, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_parents, scy_tree_2->number_of_nodes); printf("Cells:\n"); print_array(scy_tree_1->h_cells, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_cells, scy_tree_2->number_of_nodes); printf("Counts:\n"); print_array(scy_tree_1->h_counts, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_counts, scy_tree_2->number_of_nodes); throw 20; return; } if (scy_tree_1->number_of_cells != scy_tree_2->number_of_cells) { printf("number_of_cells are not the same! %d and %d\n", scy_tree_1->number_of_cells, scy_tree_2->number_of_cells); throw 20; return; } if (scy_tree_1->number_of_points != scy_tree_2->number_of_points) { printf("number_of_points are not the same! %d and %d\n", scy_tree_1->number_of_points, scy_tree_2->number_of_points); printf("Counts:\n"); print_array(scy_tree_1->h_counts, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_counts, scy_tree_2->number_of_nodes); printf("Points:\n"); print_array(scy_tree_1->h_points, scy_tree_1->number_of_points); print_array(scy_tree_2->h_points, scy_tree_2->number_of_points); printf("Placement:\n"); print_array(scy_tree_1->h_points_placement, scy_tree_1->number_of_points); print_array(scy_tree_2->h_points_placement, scy_tree_2->number_of_points); throw 20; return; } if (scy_tree_1->number_of_dims != scy_tree_2->number_of_dims) { printf("number_of_dims are not the same! %d and %d\n", scy_tree_1->number_of_dims, scy_tree_2->number_of_dims); throw 20; return; } if (scy_tree_1->number_of_restricted_dims != scy_tree_2->number_of_restricted_dims) { printf("number_of_restricted_dims are not the same! %d and %d\n", scy_tree_1->number_of_restricted_dims, scy_tree_2->number_of_restricted_dims); throw 20; return; } //todo is_s_connected failes at rnd if ((scy_tree_1->is_s_connected ? 1 : 0) != (scy_tree_2->is_s_connected ? 1 : 0)) { printf("is_s_connected are not the same! %s and %s\n", scy_tree_1->is_s_connected ? "true" : "false", scy_tree_2->is_s_connected ? "true" : "false"); throw 20; return; } int sum_1 = 0; int sum_2 = 0; for (int i = 0; i < scy_tree_1->number_of_nodes; i++) { sum_1 += scy_tree_1->h_counts[i]; sum_2 += scy_tree_2->h_counts[i]; // if (scy_tree_1->h_parents[i] != scy_tree_2->h_parents[i]) { // printf("h_parents are not the same! differ at %d\n", i); // print_array(scy_tree_1->h_parents, scy_tree_1->number_of_nodes); // print_array(scy_tree_2->h_parents, scy_tree_2->number_of_nodes); // throw 20; // return; // } // if (scy_tree_1->h_cells[i] != scy_tree_2->h_cells[i]) { // printf("h_cells are not the same! differ at %d\n", i); // print_array(scy_tree_1->h_cells, scy_tree_1->number_of_nodes); // print_array(scy_tree_2->h_cells, scy_tree_2->number_of_nodes); // // scy_tree_2->copy_to_device(); // // print_array_gpu<<<1, 1>>>(scy_tree_1->d_cells, scy_tree_1->number_of_nodes); // cudaDeviceSynchronize(); // print_array_gpu<<<1, 1>>>(scy_tree_2->d_cells, scy_tree_2->number_of_nodes); // cudaDeviceSynchronize(); // // throw 20; // return; // } // if (scy_tree_1->h_counts[i] != scy_tree_2->h_counts[i]) { // printf("h_counts are not the same! differ at %d\n", i); // print_array(scy_tree_1->h_counts, scy_tree_1->number_of_nodes); // print_array(scy_tree_2->h_counts, scy_tree_2->number_of_nodes); // throw 20; // return; // } } if (sum_1 != sum_2) { printf("h_counts are not the same! sum_1:%d, sum_2:%d\n", sum_1, sum_2); print_array(scy_tree_1->h_counts, scy_tree_1->number_of_nodes); print_array(scy_tree_2->h_counts, scy_tree_2->number_of_nodes); throw 20; return; } for (int i = 0; i < scy_tree_1->number_of_dims; i++) { if (scy_tree_1->h_dims[i] != scy_tree_2->h_dims[i]) { printf("h_dims are not the same! differ at %d\n", i); print_array(scy_tree_1->h_dims, scy_tree_1->number_of_dims); print_array(scy_tree_2->h_dims, scy_tree_2->number_of_dims); throw 20; return; } if (scy_tree_1->h_dim_start[i] != scy_tree_2->h_dim_start[i]) { printf("h_dim_start are not the same! differ at %d\n", i); print_array(scy_tree_1->h_dim_start, scy_tree_1->number_of_dims); print_array(scy_tree_2->h_dim_start, scy_tree_2->number_of_dims); throw 20; return; } } for (int i = 0; i < scy_tree_1->number_of_restricted_dims; i++) { if (scy_tree_1->h_restricted_dims[i] != scy_tree_2->h_restricted_dims[i]) { printf("h_restricted_dims are not the same! differ at %d\n", i); print_array(scy_tree_1->h_restricted_dims, scy_tree_1->number_of_restricted_dims); print_array(scy_tree_2->h_restricted_dims, scy_tree_2->number_of_restricted_dims); throw 20; return; } } pairsort(scy_tree_1->h_points, scy_tree_1->h_points_placement, scy_tree_1->number_of_points); pairsort(scy_tree_2->h_points, scy_tree_2->h_points_placement, scy_tree_2->number_of_points); for (int i = 0; i < scy_tree_1->number_of_points; i++) { if (scy_tree_1->h_points[i] != scy_tree_2->h_points[i]) { printf("h_points are not the same! differ at %d\n", i); print_array(scy_tree_1->h_points, scy_tree_1->number_of_points); print_array(scy_tree_2->h_points, scy_tree_2->number_of_points); printf("Placement:\n"); print_array(scy_tree_1->h_points_placement, scy_tree_1->number_of_points); print_array(scy_tree_2->h_points_placement, scy_tree_2->number_of_points); throw 20; return; } // if (scy_tree_1->h_points_placement[i] != scy_tree_2->h_points_placement[i]) { // printf("h_points_placement are not the same! differ at %d\n", i); // print_array(scy_tree_1->h_points_placement, scy_tree_1->number_of_points); // print_array(scy_tree_2->h_points_placement, scy_tree_2->number_of_points); // printf("Points:\n"); // print_array(scy_tree_1->h_points, scy_tree_1->number_of_points); // print_array(scy_tree_2->h_points, scy_tree_2->number_of_points); // throw 20; // return; // } } // printf("Success!\n"); } void INSCYCompare(ScyTreeNode *scy_tree, ScyTreeNode *neighborhood_tree, at::Tensor X, int n, float neighborhood_size, float F, int num_obj, int min_size, map <vector<int>, vector<int>, vec_cmp> &result, int first_dim_no, int d, int &calls) { // printf("call: %d, first_dim_no: %d, points: %d\n", calls, first_dim_no, scy_tree->number_of_points); // scy_tree->print(); ScyTreeArray *scy_tree_gpu = scy_tree->convert_to_ScyTreeArray(); scy_tree_gpu->copy_to_device(); // if(d - first_dim_no != scy_tree_gpu->number_of_dims) { // printf("not the same!!! %d vs %d\n", d - first_dim_no, scy_tree_gpu->number_of_dims); // } vector <vector<ScyTreeArray *>> L = scy_tree_gpu->restrict_gpu_multi(first_dim_no, d - first_dim_no, scy_tree_gpu->number_of_cells); TmpMalloc *tmps = new TmpMalloc(); vector <vector<ScyTreeArray *>> L_merged = scy_tree_gpu->restrict_merge_gpu_multi2(tmps, first_dim_no, d - first_dim_no, scy_tree_gpu->number_of_cells); int dim_no = first_dim_no; calls++; while (dim_no < d) { int cell_no = 0; int i = dim_no - first_dim_no; vector<int> subspace_clustering(n, -1); vector<int> subspace; int count = 0; while (cell_no < scy_tree->number_of_cells) { //restricted-tree := restrict(scy-tree, descriptor); ScyTreeNode *restricted_scy_tree = scy_tree->restrict(dim_no, cell_no); ScyTreeArray *restricted_scy_tree_conv = restricted_scy_tree->convert_to_ScyTreeArray(); ScyTreeArray *restricted_scy_tree_gpu = scy_tree_gpu->restrict_gpu(dim_no, cell_no); restricted_scy_tree_gpu->copy_to_host(); // ScyTreeArray *restricted_scy_tree_gpu_3 = scy_tree_gpu->restrict3_gpu(dim_no, cell_no); // restricted_scy_tree_gpu_3->copy_to_host(); ScyTreeArray *restricted_scy_tree_gpu_multi = L[i][cell_no]; restricted_scy_tree_gpu_multi->copy_to_host(); subspace = vector<int>(restricted_scy_tree->restricted_dims, restricted_scy_tree->restricted_dims + restricted_scy_tree->number_of_restricted_dims); // printf("After restrict:\n"); compare(restricted_scy_tree_gpu, restricted_scy_tree_conv); compare(restricted_scy_tree_gpu, restricted_scy_tree_gpu_multi); delete restricted_scy_tree_gpu_multi; //restricted-tree := mergeWithNeighbors(restricted-tree); //updates cell_no if merged with neighbors int cell_no_gpu = cell_no; restricted_scy_tree->mergeWithNeighbors(scy_tree, dim_no, cell_no); restricted_scy_tree_conv = restricted_scy_tree->convert_to_ScyTreeArray(); restricted_scy_tree_gpu = restricted_scy_tree_gpu->mergeWithNeighbors_gpu1(scy_tree_gpu, dim_no, cell_no_gpu); restricted_scy_tree_gpu->copy_to_host(); ScyTreeArray *merge_scy_tree_gpu_multi = L_merged[i][count]; merge_scy_tree_gpu_multi->copy_to_host(); // printf("After merge:\n"); compare(restricted_scy_tree_gpu, restricted_scy_tree_conv); compare(restricted_scy_tree_gpu, merge_scy_tree_gpu_multi); // printf("After merge:\n"); //pruneRecursion(restricted-tree); //prune sparse regions if (restricted_scy_tree->pruneRecursion(min_size, neighborhood_tree, X, neighborhood_size, restricted_scy_tree->restricted_dims, restricted_scy_tree->number_of_restricted_dims, F, num_obj, n, d)) { //INSCY(restricted-tree,result); //depth-first via recursion map <vector<int>, vector<int>, vec_cmp> sub_result; INSCYCompare(restricted_scy_tree, neighborhood_tree, X, n, neighborhood_size, F, num_obj, min_size, sub_result, dim_no + 1, d, calls); result.insert(sub_result.begin(), sub_result.end()); //pruneRedundancy(restricted-tree); //in-process-removal restricted_scy_tree->pruneRedundancy(0.5, sub_result);//todo does nothing atm //result := DBClustering(restricted-tree) ∪ result; int idx = restricted_scy_tree->get_dims_idx(); INSCYClusteringImplCPU(restricted_scy_tree, neighborhood_tree, X, n, neighborhood_size, F, num_obj, subspace_clustering, min_size, 0.5, result); // if (result.count(idx)) { // vector<int> clustering = result[idx]; // int m = v_max(clustering); // if (m < 0) { // result[idx] = new_clustering; // } else { // for (int i = 0; i < n; i++) { // if (new_clustering[i] == -2) { // clustering[i] = new_clustering[i]; // } else if (new_clustering[i] >= 0) { // clustering[i] = m + 1 + new_clustering[i]; // } // } // result[idx] = clustering; // } // } else { // result.insert(pair < int, vector < int >> (idx, new_clustering)); // } } count++; cell_no++; } result.insert(pair < vector < int > , vector < int >> (subspace, subspace_clustering)); dim_no++; } int total_inscy = pow(2, d); printf("CPU-INSCY(%d): %d%% \r", calls, int((result.size() * 100) / total_inscy)); }
6e8220120f4df4d27cf0997664c84bca30ba1e19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <chrono> #include <cstdlib> using namespace std; using namespace std::chrono; __global__ void reduce(float *g_idata, float *g_odata){ extern __shared__ float sdata[]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s = 1;s < blockDim.x; s *= 2){ if(tid % (2 * s) == 0){ sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } void sum_CPU(float *host_input, float *host_output, unsigned int size){ double result = 0.0f; auto start = high_resolution_clock::now(); for(int i = 0;i < size;i ++){ result += host_input[i]; } auto stop = high_resolution_clock::now(); auto time_req = duration_cast<microseconds>(stop - start).count(); cout << endl << "Time required for CPU : " << time_req << " microseconds "<< endl; cout << endl << " Sum from CPU : " << result << endl; } void compute_sum_cpu(int *cpu_input, int *cpu_output, unsigned int n){ for(unsigned int i = 0;i < n;i ++){ cpu_output[0] += cpu_input[i]; } } int main(){ int maxThreads = 1024; float *host_input, *host_output, *device_input, *device_output; float *cpu_input; float *cpu_output; int n = 2 << 29; size_t size = n * sizeof(float); //CPU sum cpu_input = (float *)malloc(sizeof(float)); cpu_output = (float *)malloc(sizeof(float)); cpu_output[0] = 0.0f; for(unsigned int i = 0;i < n;i ++){ cpu_input[i] = 1.0f; } sum_CPU(cpu_input, cpu_output, n); host_input = (float *)malloc(size); for(int i = 0;i < n;i ++){ host_input[i] = 1; } int blocks = n / maxThreads; host_output = (float *)malloc(blocks * sizeof(float)); const dim3 block_size(maxThreads, 1, 1); const dim3 grid_size(blocks, 1, 1); hipMalloc(&device_input, size); hipMalloc(&device_output, blocks * sizeof(float)); hipMemcpy(device_input, host_input, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( reduce), dim3(grid_size), dim3(block_size), maxThreads * sizeof(float), 0, device_input, device_output); hipMemcpy(host_output, device_output, blocks * sizeof(float), hipMemcpyDeviceToHost); for(int i = 1;i < blocks; i++){ host_output[0] += host_output[i]; } cout << endl << " Sum from GPU : " << *host_output << endl; }
6e8220120f4df4d27cf0997664c84bca30ba1e19.cu
#include <iostream> #include <chrono> #include <cstdlib> using namespace std; using namespace std::chrono; __global__ void reduce(float *g_idata, float *g_odata){ extern __shared__ float sdata[]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s = 1;s < blockDim.x; s *= 2){ if(tid % (2 * s) == 0){ sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } void sum_CPU(float *host_input, float *host_output, unsigned int size){ double result = 0.0f; auto start = high_resolution_clock::now(); for(int i = 0;i < size;i ++){ result += host_input[i]; } auto stop = high_resolution_clock::now(); auto time_req = duration_cast<microseconds>(stop - start).count(); cout << endl << "Time required for CPU : " << time_req << " microseconds "<< endl; cout << endl << " Sum from CPU : " << result << endl; } void compute_sum_cpu(int *cpu_input, int *cpu_output, unsigned int n){ for(unsigned int i = 0;i < n;i ++){ cpu_output[0] += cpu_input[i]; } } int main(){ int maxThreads = 1024; float *host_input, *host_output, *device_input, *device_output; float *cpu_input; float *cpu_output; int n = 2 << 29; size_t size = n * sizeof(float); //CPU sum cpu_input = (float *)malloc(sizeof(float)); cpu_output = (float *)malloc(sizeof(float)); cpu_output[0] = 0.0f; for(unsigned int i = 0;i < n;i ++){ cpu_input[i] = 1.0f; } sum_CPU(cpu_input, cpu_output, n); host_input = (float *)malloc(size); for(int i = 0;i < n;i ++){ host_input[i] = 1; } int blocks = n / maxThreads; host_output = (float *)malloc(blocks * sizeof(float)); const dim3 block_size(maxThreads, 1, 1); const dim3 grid_size(blocks, 1, 1); cudaMalloc(&device_input, size); cudaMalloc(&device_output, blocks * sizeof(float)); cudaMemcpy(device_input, host_input, size, cudaMemcpyHostToDevice); reduce<<<grid_size, block_size, maxThreads * sizeof(float)>>>(device_input, device_output); cudaMemcpy(host_output, device_output, blocks * sizeof(float), cudaMemcpyDeviceToHost); for(int i = 1;i < blocks; i++){ host_output[0] += host_output[i]; } cout << endl << " Sum from GPU : " << *host_output << endl; }
dba74394343518bc56cf8b854744fa88c1d24a2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. Indicesou may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { template <typename T, framework::DataLayout layout, bool HasBias> __global__ void KeAffineChannelCUDA(const T* x, const T* scale, const T* bias, const int C, const int HxW, const int num, T* y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C; if (HasBias) { y[i] = scale[c] * x[i] + bias[c]; } else { y[i] = scale[c] * x[i]; } } } template <typename DeviceContext, typename T> class AffineChannelCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<framework::Tensor>("X"); auto* scale = ctx.Input<framework::Tensor>("Scale"); auto* bias = ctx.Input<framework::Tensor>("Bias"); auto* y = ctx.Output<framework::Tensor>("Out"); y->mutable_data<T>(ctx.GetPlace()); const framework::DataLayout layout = framework::StringToDataLayout(ctx.Attr<std::string>("data_layout")); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto dims = x->dims(); const int num = x->numel(); int N = dims[0]; int C = layout == framework::DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1]; int HxW = num / N / C; const T* x_d = x->data<T>(); const T* scale_d = scale->data<T>(); const T* bias_d = bias->data<T>(); T* y_d = y->data<T>(); #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif // PADDLE_WITH_HIP int grid = (num + block - 1) / block; int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); grid = ::min(::max(max_threads / block, 1), grid); if (layout == framework::DataLayout::kNCHW) { hipLaunchKernelGGL(( KeAffineChannelCUDA<T, framework::DataLayout::kNCHW, true>), dim3(grid), dim3(block), 0, dev_ctx.stream(), x_d, scale_d, bias_d, C, HxW, num, y_d); } else { hipLaunchKernelGGL(( KeAffineChannelCUDA<T, framework::DataLayout::kNHWC, true>), dim3(grid), dim3(block), 0, dev_ctx.stream(), x_d, scale_d, bias_d, C, HxW, num, y_d); } } }; template <typename T, int BlockDim, framework::DataLayout layout> __global__ void AffineChannelScaleBiasGradientCUDAKernel( const T* dy, const T* x, const int N, const int C, const int HxW, T* dscale, T* dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<double, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T ds_sum = 0; T db_sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += dy[index] * x[index]; db_sum += dy[index]; } __syncthreads(); auto ds_out = BlockReduce(ds_storage).Reduce(static_cast<double>(ds_sum), hipcub::Sum()); auto db_out = BlockReduce(db_storage).Reduce(static_cast<double>(db_sum), hipcub::Sum()); __syncthreads(); if (threadIdx.x == 0) { dscale[i] = ds_out; dbias[i] = db_out; } } } template <typename DeviceContext, typename T> class AffineChannelGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<framework::Tensor>("X"); auto* scale = ctx.Input<framework::Tensor>("Scale"); auto* bias = ctx.Input<framework::Tensor>("Bias"); auto* dy = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto* dx = ctx.Output<framework::Tensor>(framework::GradVarName("X")); auto* dscale = ctx.Output<framework::Tensor>(framework::GradVarName("Scale")); auto* dbias = ctx.Output<framework::Tensor>(framework::GradVarName("Bias")); const framework::DataLayout layout = framework::StringToDataLayout(ctx.Attr<std::string>("data_layout")); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto dims = dy->dims(); const int num = dy->numel(); int N = dims[0]; int C = layout == framework::DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1]; int HxW = num / N / C; const T* dy_d = dy->data<T>(); const T* s_d = scale->data<T>(); T* dx_d = dx ? dx->mutable_data<T>(ctx.GetPlace()) : nullptr; T* ds_d = dscale ? dscale->mutable_data<T>(ctx.GetPlace()) : nullptr; T* db_d = dbias ? dbias->mutable_data<T>(ctx.GetPlace()) : nullptr; #ifdef PADDLE_WITH_HIP const int block = 256; #else const int block = 1024; #endif // PADDLE_WITH_HIP int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = ::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = ::min(C, max_blocks); if (layout == framework::DataLayout::kNCHW) { if (dscale && dbias) { const T* x_d = x->data<T>(); hipLaunchKernelGGL(( AffineChannelScaleBiasGradientCUDAKernel< T, block, framework::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, dev_ctx.stream(), dy_d, x_d, N, C, HxW, ds_d, db_d); } if (dx) { hipLaunchKernelGGL(( KeAffineChannelCUDA<T, framework::DataLayout::kNCHW, false>), dim3(grid1), dim3(block), 0, dev_ctx.stream(), dy_d, s_d, nullptr, C, HxW, num, dx_d); } } else { if (dscale && dbias) { const T* x_d = x->data<T>(); hipLaunchKernelGGL(( AffineChannelScaleBiasGradientCUDAKernel< T, block, framework::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, dev_ctx.stream(), dy_d, x_d, N, C, HxW, ds_d, db_d); } if (dx) { hipLaunchKernelGGL(( KeAffineChannelCUDA<T, framework::DataLayout::kNHWC, false>), dim3(grid1), dim3(block), 0, dev_ctx.stream(), dy_d, s_d, nullptr, C, HxW, num, dx_d); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(affine_channel, ops::AffineChannelCUDAKernel<CUDA, float>, ops::AffineChannelCUDAKernel<CUDA, double>); REGISTER_OP_CUDA_KERNEL(affine_channel_grad, ops::AffineChannelGradCUDAKernel<CUDA, float>, ops::AffineChannelGradCUDAKernel<CUDA, double>);
dba74394343518bc56cf8b854744fa88c1d24a2a.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. Indicesou may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { template <typename T, framework::DataLayout layout, bool HasBias> __global__ void KeAffineChannelCUDA(const T* x, const T* scale, const T* bias, const int C, const int HxW, const int num, T* y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C; if (HasBias) { y[i] = scale[c] * x[i] + bias[c]; } else { y[i] = scale[c] * x[i]; } } } template <typename DeviceContext, typename T> class AffineChannelCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<framework::Tensor>("X"); auto* scale = ctx.Input<framework::Tensor>("Scale"); auto* bias = ctx.Input<framework::Tensor>("Bias"); auto* y = ctx.Output<framework::Tensor>("Out"); y->mutable_data<T>(ctx.GetPlace()); const framework::DataLayout layout = framework::StringToDataLayout(ctx.Attr<std::string>("data_layout")); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto dims = x->dims(); const int num = x->numel(); int N = dims[0]; int C = layout == framework::DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1]; int HxW = num / N / C; const T* x_d = x->data<T>(); const T* scale_d = scale->data<T>(); const T* bias_d = bias->data<T>(); T* y_d = y->data<T>(); #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif // PADDLE_WITH_HIP int grid = (num + block - 1) / block; int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); grid = std::min(std::max(max_threads / block, 1), grid); if (layout == framework::DataLayout::kNCHW) { KeAffineChannelCUDA<T, framework::DataLayout::kNCHW, true><<<grid, block, 0, dev_ctx.stream()>>>( x_d, scale_d, bias_d, C, HxW, num, y_d); } else { KeAffineChannelCUDA<T, framework::DataLayout::kNHWC, true><<<grid, block, 0, dev_ctx.stream()>>>( x_d, scale_d, bias_d, C, HxW, num, y_d); } } }; template <typename T, int BlockDim, framework::DataLayout layout> __global__ void AffineChannelScaleBiasGradientCUDAKernel( const T* dy, const T* x, const int N, const int C, const int HxW, T* dscale, T* dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<double, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T ds_sum = 0; T db_sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += dy[index] * x[index]; db_sum += dy[index]; } __syncthreads(); auto ds_out = BlockReduce(ds_storage).Reduce(static_cast<double>(ds_sum), cub::Sum()); auto db_out = BlockReduce(db_storage).Reduce(static_cast<double>(db_sum), cub::Sum()); __syncthreads(); if (threadIdx.x == 0) { dscale[i] = ds_out; dbias[i] = db_out; } } } template <typename DeviceContext, typename T> class AffineChannelGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<framework::Tensor>("X"); auto* scale = ctx.Input<framework::Tensor>("Scale"); auto* bias = ctx.Input<framework::Tensor>("Bias"); auto* dy = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto* dx = ctx.Output<framework::Tensor>(framework::GradVarName("X")); auto* dscale = ctx.Output<framework::Tensor>(framework::GradVarName("Scale")); auto* dbias = ctx.Output<framework::Tensor>(framework::GradVarName("Bias")); const framework::DataLayout layout = framework::StringToDataLayout(ctx.Attr<std::string>("data_layout")); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto dims = dy->dims(); const int num = dy->numel(); int N = dims[0]; int C = layout == framework::DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1]; int HxW = num / N / C; const T* dy_d = dy->data<T>(); const T* s_d = scale->data<T>(); T* dx_d = dx ? dx->mutable_data<T>(ctx.GetPlace()) : nullptr; T* ds_d = dscale ? dscale->mutable_data<T>(ctx.GetPlace()) : nullptr; T* db_d = dbias ? dbias->mutable_data<T>(ctx.GetPlace()) : nullptr; #ifdef PADDLE_WITH_HIP const int block = 256; #else const int block = 1024; #endif // PADDLE_WITH_HIP int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = std::min(C, max_blocks); if (layout == framework::DataLayout::kNCHW) { if (dscale && dbias) { const T* x_d = x->data<T>(); AffineChannelScaleBiasGradientCUDAKernel< T, block, framework::DataLayout::kNCHW><<<grid2, block, 0, dev_ctx.stream()>>>( dy_d, x_d, N, C, HxW, ds_d, db_d); } if (dx) { KeAffineChannelCUDA<T, framework::DataLayout::kNCHW, false><<<grid1, block, 0, dev_ctx.stream()>>>( dy_d, s_d, nullptr, C, HxW, num, dx_d); } } else { if (dscale && dbias) { const T* x_d = x->data<T>(); AffineChannelScaleBiasGradientCUDAKernel< T, block, framework::DataLayout::kNHWC><<<grid2, block, 0, dev_ctx.stream()>>>( dy_d, x_d, N, C, HxW, ds_d, db_d); } if (dx) { KeAffineChannelCUDA<T, framework::DataLayout::kNHWC, false><<<grid1, block, 0, dev_ctx.stream()>>>( dy_d, s_d, nullptr, C, HxW, num, dx_d); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(affine_channel, ops::AffineChannelCUDAKernel<CUDA, float>, ops::AffineChannelCUDAKernel<CUDA, double>); REGISTER_OP_CUDA_KERNEL(affine_channel_grad, ops::AffineChannelGradCUDAKernel<CUDA, float>, ops::AffineChannelGradCUDAKernel<CUDA, double>);
18a6c768c993b3279749828abad4290dd2f67f60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gtest/gtest.h> #include <algorithm> #include <vector> #include <arbor/gpu/reduce_by_key.hpp> #include "gpu_vector.hpp" using namespace arb; template <typename T, typename I> __global__ void reduce_kernel(const T* src, T* dst, const I* index, int n) { unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; unsigned mask = gpu::ballot(0xffffffff, tid<n); if (tid<n) { gpu::reduce_by_key(src[tid], dst, index[tid], mask); } } template <typename T> std::vector<T> reduce(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) { EXPECT_EQ(in.size(), index.size()); EXPECT_TRUE(std::is_sorted(index.begin(), index.end())); using array = gpu_vector<T>; using iarray = gpu_vector<int>; int n = in.size(); array src(in); iarray idx(index); array dst(std::vector<T>(n_out, 0)); unsigned grid_dim = (n-1)/block_dim + 1; hipLaunchKernelGGL(( reduce_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, src.data(), dst.data(), idx.data(), n); return dst.host_vector(); } TEST(reduce_by_key, no_repetitions) { int n = 64; std::vector<int> index(n); for (int i=0; i<n; ++i) index[i] = i; { std::vector<float> in(n, 1); auto out = reduce(in, n, index); for (auto o: out) EXPECT_EQ(o, 1.0f); } { std::vector<double> in(n, 1); auto out = reduce(in, n, index); for (auto o: out) EXPECT_EQ(o, 1.0); } } TEST(reduce_by_key, single_repeated_index) { // Perform reduction of a sequence of 1s of length n // The expected result is n for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) { std::vector<double> in(n, 1); std::vector<int> index(n, 0); auto out = reduce(in, 1, index, 32); EXPECT_EQ(double(n), out[0]); } // Perform reduction of an ascending sequence of {1,2,3,...,n} // The expected result is n*(n+1)/2 for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) { std::vector<double> in(n); for (int i=0; i<n; ++i) in[i] = i+1; std::vector<int> index(n, 0); auto out = reduce(in, 1, index); EXPECT_EQ(out[0], double((n+1)*n/2)); } } TEST(reduce_by_key, scatter) { // A monotonic sequence of keys with repetitions and gaps, for a reduction // onto an array of length 12. std::size_t n = 12; std::vector<int> index = {0,0,0,1,2,2,2,2,3,3,7,7,7,7,7,11}; std::vector<double> in(index.size(), 1); std::vector<double> expected = {3., 1., 4., 2., 0., 0., 0., 5., 0., 0., 0., 1.}; EXPECT_EQ(n, expected.size()); auto out = reduce(in, n, index); EXPECT_EQ(expected, out); // rerun with 7 threads per thread block, to test // * using more than one thread block // * thread blocks that are not a multiple of 32 // * thread blocks that are less than 32 out = reduce(in, n, index, 7); EXPECT_EQ(expected, out); } // Test kernels that perform more than one reduction in a single invokation. // Used to reproduce and test for synchronization issues on V100 GPUs. template <typename T, typename I> __global__ void reduce_twice_kernel(const T* src, T* dst, const I* index, int n) { unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; unsigned mask = gpu::ballot(0xffffffff, tid<n); if (tid<n) { gpu::reduce_by_key(src[tid], dst, index[tid], mask); gpu::reduce_by_key(src[tid], dst, index[tid], mask); } } template <typename T> std::vector<T> reduce_twice(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) { EXPECT_EQ(in.size(), index.size()); EXPECT_TRUE(std::is_sorted(index.begin(), index.end())); using array = gpu_vector<T>; using iarray = gpu_vector<int>; int n = in.size(); array src(in); iarray idx(index); array dst(std::vector<T>(n_out, 0)); unsigned grid_dim = (n-1)/block_dim + 1; hipLaunchKernelGGL(( reduce_twice_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, src.data(), dst.data(), idx.data(), n); return dst.host_vector(); } TEST(reduce_by_key, scatter_twice) { // A monotonic sequence of keys with repetitions and gaps, for a reduction // onto an array of length 12. std::size_t n = 12; std::vector<int> index = {0,0,0,1,2,2,3,7,7,7,11}; std::vector<double> in(index.size(), 1); std::vector<double> expected = {6., 2., 4., 2., 0., 0., 0., 6., 0., 0., 0., 2.}; EXPECT_EQ(n, expected.size()); auto out = reduce_twice(in, n, index); EXPECT_EQ(expected, out); // rerun with 7 threads per thread block, to test // * using more than one thread block // * thread blocks that are not a multiple of 32 // * thread blocks that are less than 32 out = reduce_twice(in, n, index, 7); EXPECT_EQ(expected, out); }
18a6c768c993b3279749828abad4290dd2f67f60.cu
#include <gtest/gtest.h> #include <algorithm> #include <vector> #include <arbor/gpu/reduce_by_key.hpp> #include "gpu_vector.hpp" using namespace arb; template <typename T, typename I> __global__ void reduce_kernel(const T* src, T* dst, const I* index, int n) { unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; unsigned mask = gpu::ballot(0xffffffff, tid<n); if (tid<n) { gpu::reduce_by_key(src[tid], dst, index[tid], mask); } } template <typename T> std::vector<T> reduce(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) { EXPECT_EQ(in.size(), index.size()); EXPECT_TRUE(std::is_sorted(index.begin(), index.end())); using array = gpu_vector<T>; using iarray = gpu_vector<int>; int n = in.size(); array src(in); iarray idx(index); array dst(std::vector<T>(n_out, 0)); unsigned grid_dim = (n-1)/block_dim + 1; reduce_kernel<<<grid_dim, block_dim>>>(src.data(), dst.data(), idx.data(), n); return dst.host_vector(); } TEST(reduce_by_key, no_repetitions) { int n = 64; std::vector<int> index(n); for (int i=0; i<n; ++i) index[i] = i; { std::vector<float> in(n, 1); auto out = reduce(in, n, index); for (auto o: out) EXPECT_EQ(o, 1.0f); } { std::vector<double> in(n, 1); auto out = reduce(in, n, index); for (auto o: out) EXPECT_EQ(o, 1.0); } } TEST(reduce_by_key, single_repeated_index) { // Perform reduction of a sequence of 1s of length n // The expected result is n for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) { std::vector<double> in(n, 1); std::vector<int> index(n, 0); auto out = reduce(in, 1, index, 32); EXPECT_EQ(double(n), out[0]); } // Perform reduction of an ascending sequence of {1,2,3,...,n} // The expected result is n*(n+1)/2 for (auto n: {1, 2, 7, 31, 32, 33, 63, 64, 65, 128}) { std::vector<double> in(n); for (int i=0; i<n; ++i) in[i] = i+1; std::vector<int> index(n, 0); auto out = reduce(in, 1, index); EXPECT_EQ(out[0], double((n+1)*n/2)); } } TEST(reduce_by_key, scatter) { // A monotonic sequence of keys with repetitions and gaps, for a reduction // onto an array of length 12. std::size_t n = 12; std::vector<int> index = {0,0,0,1,2,2,2,2,3,3,7,7,7,7,7,11}; std::vector<double> in(index.size(), 1); std::vector<double> expected = {3., 1., 4., 2., 0., 0., 0., 5., 0., 0., 0., 1.}; EXPECT_EQ(n, expected.size()); auto out = reduce(in, n, index); EXPECT_EQ(expected, out); // rerun with 7 threads per thread block, to test // * using more than one thread block // * thread blocks that are not a multiple of 32 // * thread blocks that are less than 32 out = reduce(in, n, index, 7); EXPECT_EQ(expected, out); } // Test kernels that perform more than one reduction in a single invokation. // Used to reproduce and test for synchronization issues on V100 GPUs. template <typename T, typename I> __global__ void reduce_twice_kernel(const T* src, T* dst, const I* index, int n) { unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; unsigned mask = gpu::ballot(0xffffffff, tid<n); if (tid<n) { gpu::reduce_by_key(src[tid], dst, index[tid], mask); gpu::reduce_by_key(src[tid], dst, index[tid], mask); } } template <typename T> std::vector<T> reduce_twice(const std::vector<T>& in, size_t n_out, const std::vector<int>& index, unsigned block_dim=128) { EXPECT_EQ(in.size(), index.size()); EXPECT_TRUE(std::is_sorted(index.begin(), index.end())); using array = gpu_vector<T>; using iarray = gpu_vector<int>; int n = in.size(); array src(in); iarray idx(index); array dst(std::vector<T>(n_out, 0)); unsigned grid_dim = (n-1)/block_dim + 1; reduce_twice_kernel<<<grid_dim, block_dim>>>(src.data(), dst.data(), idx.data(), n); return dst.host_vector(); } TEST(reduce_by_key, scatter_twice) { // A monotonic sequence of keys with repetitions and gaps, for a reduction // onto an array of length 12. std::size_t n = 12; std::vector<int> index = {0,0,0,1,2,2,3,7,7,7,11}; std::vector<double> in(index.size(), 1); std::vector<double> expected = {6., 2., 4., 2., 0., 0., 0., 6., 0., 0., 0., 2.}; EXPECT_EQ(n, expected.size()); auto out = reduce_twice(in, n, index); EXPECT_EQ(expected, out); // rerun with 7 threads per thread block, to test // * using more than one thread block // * thread blocks that are not a multiple of 32 // * thread blocks that are less than 32 out = reduce_twice(in, n, index, 7); EXPECT_EQ(expected, out); }
79b1b629218f63e9a01eeae73834ddc06a58a4de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../gtest.h" #include <backends/gpu/intrinsics.hpp> #include <backends/gpu/managed_ptr.hpp> namespace kernels { template <typename T> __global__ void test_atomic_add(T* x) { cuda_atomic_add(x, threadIdx.x+1); } template <typename T> __global__ void test_atomic_sub(T* x) { cuda_atomic_sub(x, threadIdx.x+1); } } // test atomic addition wrapper for single and double precision TEST(gpu_intrinsics, cuda_atomic_add) { int expected = (128*129)/2; auto f = arb::gpu::make_managed_ptr<float>(0.f); hipLaunchKernelGGL(( kernels::test_atomic_add), dim3(1), dim3(128), 0, 0, f.get()); hipDeviceSynchronize(); EXPECT_EQ(float(expected), *f); auto d = arb::gpu::make_managed_ptr<double>(0.); hipLaunchKernelGGL(( kernels::test_atomic_add), dim3(1), dim3(128), 0, 0, d.get()); hipDeviceSynchronize(); EXPECT_EQ(double(expected), *d); } // test atomic subtraction wrapper for single and double precision TEST(gpu_intrinsics, cuda_atomic_sub) { int expected = -(128*129)/2; auto f = arb::gpu::make_managed_ptr<float>(0.f); hipLaunchKernelGGL(( kernels::test_atomic_sub), dim3(1), dim3(128), 0, 0, f.get()); hipDeviceSynchronize(); EXPECT_EQ(float(expected), *f); auto d = arb::gpu::make_managed_ptr<double>(0.); hipLaunchKernelGGL(( kernels::test_atomic_sub), dim3(1), dim3(128), 0, 0, d.get()); hipDeviceSynchronize(); EXPECT_EQ(double(expected), *d); }
79b1b629218f63e9a01eeae73834ddc06a58a4de.cu
#include "../gtest.h" #include <backends/gpu/intrinsics.hpp> #include <backends/gpu/managed_ptr.hpp> namespace kernels { template <typename T> __global__ void test_atomic_add(T* x) { cuda_atomic_add(x, threadIdx.x+1); } template <typename T> __global__ void test_atomic_sub(T* x) { cuda_atomic_sub(x, threadIdx.x+1); } } // test atomic addition wrapper for single and double precision TEST(gpu_intrinsics, cuda_atomic_add) { int expected = (128*129)/2; auto f = arb::gpu::make_managed_ptr<float>(0.f); kernels::test_atomic_add<<<1, 128>>>(f.get()); cudaDeviceSynchronize(); EXPECT_EQ(float(expected), *f); auto d = arb::gpu::make_managed_ptr<double>(0.); kernels::test_atomic_add<<<1, 128>>>(d.get()); cudaDeviceSynchronize(); EXPECT_EQ(double(expected), *d); } // test atomic subtraction wrapper for single and double precision TEST(gpu_intrinsics, cuda_atomic_sub) { int expected = -(128*129)/2; auto f = arb::gpu::make_managed_ptr<float>(0.f); kernels::test_atomic_sub<<<1, 128>>>(f.get()); cudaDeviceSynchronize(); EXPECT_EQ(float(expected), *f); auto d = arb::gpu::make_managed_ptr<double>(0.); kernels::test_atomic_sub<<<1, 128>>>(d.get()); cudaDeviceSynchronize(); EXPECT_EQ(double(expected), *d); }
db9ee70d571266d56229b05046680e13e5c1568f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/NumericLimits.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <c10/macros/Macros.h> #include <ATen/native/hip/LaunchUtils.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) { return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1; } static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) { return min((size + pad) / stride + 1, pooled_size); } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; const scalar_t* btm_data = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = btm_data[h * width + w]; if ((static_cast<accscalar_t>(val) > maxval) || at::_isnan(val)) { maxidx = h * width + w; maxval = static_cast<accscalar_t>(val); } } } top_data[index] = static_cast<accscalar_t>(maxval); top_mask[index] = maxidx; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int nbatch, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int in_stride_n, const int in_stride_c, const int in_stride_h, const int in_stride_w, const int kernel_stride_C, const int kernel_size_C, scalar_t* top_data, int64_t* top_mask) { extern __shared__ int smem[]; int *out_mask_cached = smem; scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[kernel_size_C*blockDim.x*blockDim.y*blockDim.z]); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[i] = 0; } __syncthreads(); int batch_id = blockIdx.x % nbatch; int channel_id = blockIdx.x / nbatch; int channel_offset = threadIdx.x + channel_id * blockDim.x; top_data = top_data + batch_id * pooled_height * pooled_width * channels; top_mask = top_mask + batch_id * pooled_height * pooled_width * channels; bottom_data = bottom_data + batch_id * in_stride_n; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; int oH = (pooled_height + gridDim.z-1) / gridDim.z; int oW = (pooled_width + gridDim.y-1) / gridDim.y; int ostartH = threadIdx.z + blockIdx.z*oH; int oendH = ::min(ostartH+oH, pooled_height); int ostartW = threadIdx.y + blockIdx.y*oW; int oendW = ::min(ostartW+oW, pooled_width); for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { int hstart = oh * stride_h - pad_h; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int wstart = ow * stride_w - pad_w; int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; for (int ih = hstart; ih < hend; ih++) { for (int iw = wstart; iw < wend; iw++) { int cached_index = threadIdx.x; const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w; for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) { scalar_t val = ptr_input[c*in_stride_c]; if ((static_cast<accscalar_t>(val) > out_cached[cached_index]) || at::_isnan(val)) { out_cached[cached_index] = static_cast<accscalar_t>(val); out_mask_cached[cached_index] = ih * width + iw; } cached_index += blockDim.x; } } } scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels; int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels; int cached_index = threadIdx.x; for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) { ptr_output_data[c] = out_cached[cached_index]; ptr_output_mask[c] = out_mask_cached[cached_index]; out_cached[cached_index] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[cached_index] = 0; cached_index += blockDim.x; } } } } static const int BLOCK_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (USE_ROCM) C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8) #endif __global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index / width; int w = index - h * width; int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(h, pad_h, pooled_height, stride_h); int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(w, pad_w, pooled_width, stride_w); for (int n = blockIdx.y; n < num; n += gridDim.y) { for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw + offset] == h * width + w) { gradient += static_cast<accscalar_t>(top_diff[ph * pooled_width + pw + offset]); } } } bottom_diff[(n*channels+c)*height*width+index] = static_cast<scalar_t>(gradient); } } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int nbatch, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int out_stride_c, const int out_stride_h, const int out_stride_w, const int in_stride_n, const int in_stride_c, const int in_stride_h, const int in_stride_w, const int kernel_stride_C, const int kernel_size_C, scalar_t* bottom_diff) { extern __shared__ int smem[]; accscalar_t *out_cached = reinterpret_cast<accscalar_t*>(smem); int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; int batch_id = blockIdx.x % nbatch; int channel_id = blockIdx.x / nbatch; int channel_offset = threadIdx.x + channel_id * blockDim.x; for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = accscalar_t(0.0); } __syncthreads(); out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; bottom_diff = bottom_diff + batch_id * height * width * channels; top_mask = top_mask + batch_id * pooled_height * pooled_width * channels; top_diff = top_diff + batch_id * pooled_height * pooled_width * channels; int iH = (height + gridDim.z-1) / gridDim.z; int iW = (width + gridDim.y-1) / gridDim.y; int istartH = threadIdx.z + blockIdx.z*iH; int iendH = ::min(istartH+iH, height); int istartW = threadIdx.y + blockIdx.y*iW; int iendW = ::min(istartW+iW, width); for (int ih = istartH; ih < iendH; ih+=blockDim.z) { int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(ih, pad_h, pooled_height, stride_h); for (int iw = istartW; iw < iendW; iw+=blockDim.y) { int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(iw, pad_w, pooled_width, stride_w); int index_shift = ih * width + iw; if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for(int oh = phstart; oh < phend; ++oh) { for(int ow = pwstart; ow < pwend; ++ow) { int cached_index = threadIdx.x; const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { if (ptr_top_mask[c*out_stride_c] == index_shift) { out_cached[cached_index] += static_cast<accscalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c*out_stride_c]); } cached_index += blockDim.x; } } } scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels; int cached_index = threadIdx.x; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { ptr_bottom_diff[c] = static_cast<scalar_t>(out_cached[cached_index]); out_cached[cached_index] = accscalar_t(0.0); cached_index += blockDim.x; } } else { const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w; scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels; int cached_index = threadIdx.x; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { if (ptr_top_mask[c*out_stride_c] == index_shift) { ptr_bottom_diff[c] = static_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c*out_stride_c]); } cached_index += blockDim.x; } } } } } } // namespace TORCH_IMPL_FUNC(max_pool2d_with_indices_out_cuda) (const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& output, const Tensor& indices) { NoNamesGuard guard; TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); if (output.numel() == 0) { return; } const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputHeight = output.size(-2); const int64_t outputWidth = output.size(-1); Tensor input = input_.contiguous(memory_format); const int64_t in_stride_n = input_.ndimension() == 4 ? input.stride(-4) : 0; const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const int count = safe_downcast<int, int64_t>(output.numel()); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = ceil_div( safe_downcast<int, int64_t>(nInputPlane), block_x * 4); int kernel_size_C = ceil_div( safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C); int grid_x = nbatch*kernel_stride_C; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], ceil_div(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * (sizeof(int) + sizeof(scalar_t)); AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock); hipLaunchKernelGGL(( max_pool_forward_nhwc<scalar_t, scalar_t>) , dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, in_stride_n, in_stride_c, in_stride_h, in_stride_w, kernel_stride_C, kernel_size_C, output_data, indices_data); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } case MemoryFormat::Contiguous: { const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BLOCK_THREADS); hipLaunchKernelGGL(( max_pool_forward_nchw<scalar_t, scalar_t>) , dim3(ceil_div(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); } TORCH_IMPL_FUNC(max_pool2d_with_indices_backward_out_cuda) (const Tensor& gradOutput_, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices, const Tensor& gradInput) { NoNamesGuard guard; TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU(__func__, {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); if (gradOutput_.numel() == 0) { return; } const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); const Tensor input = input_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t in_stride_n = input.ndimension() == 4 ? input.stride(-4) : 0; const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t outputHeight = gradOutput.size(-2); const int64_t outputWidth = gradOutput.size(-1); const int64_t out_stride_c = gradOutput.stride(-3); const int64_t out_stride_h = gradOutput.stride(-2); const int64_t out_stride_w = gradOutput.stride(-1); gradInput.zero_(); int64_t count = input.numel(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = ceil_div( safe_downcast<int, int64_t>(nInputPlane), block_x * 4); int kernel_size_C = ceil_div( safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C); int grid_x = nbatch*kernel_stride_C; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], ceil_div(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * sizeof(accscalar_t); AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock); // The backward kernel is launched on input instead output. // If it is launched on output layer, atomic_add would not provide much benefit on FP16. // Please check comments at https://github.com/pytorch/pytorch/pull/34519. hipLaunchKernelGGL(( max_pool_backward_nhwc<scalar_t, accscalar_t>) , dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, out_stride_c, out_stride_h, out_stride_w, in_stride_n, in_stride_c, in_stride_h, in_stride_w, kernel_stride_C, kernel_size_C, gradInput_data); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } case MemoryFormat::Contiguous: { int imgcount = inputWidth * inputHeight; dim3 grid; const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS; grid.x = blocks; grid.y = nbatch; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; if (maxGridY < grid.y) grid.y = maxGridY; grid.z = nInputPlane; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridZ < grid.z) grid.z = maxGridZ; hipLaunchKernelGGL(( max_pool_backward_nchw<scalar_t, accscalar_t>) , dim3(grid), dim3(BLOCK_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); } } // at::native } // at
db9ee70d571266d56229b05046680e13e5c1568f.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/macros/Macros.h> #include <ATen/native/cuda/LaunchUtils.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) { return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1; } static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) { return min((size + pad) / stride + 1, pooled_size); } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; const scalar_t* btm_data = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = btm_data[h * width + w]; if ((static_cast<accscalar_t>(val) > maxval) || at::_isnan(val)) { maxidx = h * width + w; maxval = static_cast<accscalar_t>(val); } } } top_data[index] = static_cast<accscalar_t>(maxval); top_mask[index] = maxidx; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int nbatch, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int in_stride_n, const int in_stride_c, const int in_stride_h, const int in_stride_w, const int kernel_stride_C, const int kernel_size_C, scalar_t* top_data, int64_t* top_mask) { extern __shared__ int smem[]; int *out_mask_cached = smem; scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[kernel_size_C*blockDim.x*blockDim.y*blockDim.z]); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[i] = 0; } __syncthreads(); int batch_id = blockIdx.x % nbatch; int channel_id = blockIdx.x / nbatch; int channel_offset = threadIdx.x + channel_id * blockDim.x; top_data = top_data + batch_id * pooled_height * pooled_width * channels; top_mask = top_mask + batch_id * pooled_height * pooled_width * channels; bottom_data = bottom_data + batch_id * in_stride_n; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; int oH = (pooled_height + gridDim.z-1) / gridDim.z; int oW = (pooled_width + gridDim.y-1) / gridDim.y; int ostartH = threadIdx.z + blockIdx.z*oH; int oendH = ::min(ostartH+oH, pooled_height); int ostartW = threadIdx.y + blockIdx.y*oW; int oendW = ::min(ostartW+oW, pooled_width); for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { int hstart = oh * stride_h - pad_h; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int wstart = ow * stride_w - pad_w; int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; for (int ih = hstart; ih < hend; ih++) { for (int iw = wstart; iw < wend; iw++) { int cached_index = threadIdx.x; const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w; for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) { scalar_t val = ptr_input[c*in_stride_c]; if ((static_cast<accscalar_t>(val) > out_cached[cached_index]) || at::_isnan(val)) { out_cached[cached_index] = static_cast<accscalar_t>(val); out_mask_cached[cached_index] = ih * width + iw; } cached_index += blockDim.x; } } } scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels; int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels; int cached_index = threadIdx.x; for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) { ptr_output_data[c] = out_cached[cached_index]; ptr_output_mask[c] = out_mask_cached[cached_index]; out_cached[cached_index] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[cached_index] = 0; cached_index += blockDim.x; } } } } static const int BLOCK_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (USE_ROCM) C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8) #endif __global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index / width; int w = index - h * width; int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(h, pad_h, pooled_height, stride_h); int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(w, pad_w, pooled_width, stride_w); for (int n = blockIdx.y; n < num; n += gridDim.y) { for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw + offset] == h * width + w) { gradient += static_cast<accscalar_t>(top_diff[ph * pooled_width + pw + offset]); } } } bottom_diff[(n*channels+c)*height*width+index] = static_cast<scalar_t>(gradient); } } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int nbatch, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int out_stride_c, const int out_stride_h, const int out_stride_w, const int in_stride_n, const int in_stride_c, const int in_stride_h, const int in_stride_w, const int kernel_stride_C, const int kernel_size_C, scalar_t* bottom_diff) { extern __shared__ int smem[]; accscalar_t *out_cached = reinterpret_cast<accscalar_t*>(smem); int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; int batch_id = blockIdx.x % nbatch; int channel_id = blockIdx.x / nbatch; int channel_offset = threadIdx.x + channel_id * blockDim.x; for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = accscalar_t(0.0); } __syncthreads(); out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; bottom_diff = bottom_diff + batch_id * height * width * channels; top_mask = top_mask + batch_id * pooled_height * pooled_width * channels; top_diff = top_diff + batch_id * pooled_height * pooled_width * channels; int iH = (height + gridDim.z-1) / gridDim.z; int iW = (width + gridDim.y-1) / gridDim.y; int istartH = threadIdx.z + blockIdx.z*iH; int iendH = ::min(istartH+iH, height); int istartW = threadIdx.y + blockIdx.y*iW; int iendW = ::min(istartW+iW, width); for (int ih = istartH; ih < iendH; ih+=blockDim.z) { int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(ih, pad_h, pooled_height, stride_h); for (int iw = istartW; iw < iendW; iw+=blockDim.y) { int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(iw, pad_w, pooled_width, stride_w); int index_shift = ih * width + iw; if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for(int oh = phstart; oh < phend; ++oh) { for(int ow = pwstart; ow < pwend; ++ow) { int cached_index = threadIdx.x; const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { if (ptr_top_mask[c*out_stride_c] == index_shift) { out_cached[cached_index] += static_cast<accscalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c*out_stride_c]); } cached_index += blockDim.x; } } } scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels; int cached_index = threadIdx.x; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { ptr_bottom_diff[c] = static_cast<scalar_t>(out_cached[cached_index]); out_cached[cached_index] = accscalar_t(0.0); cached_index += blockDim.x; } } else { const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w; scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels; int cached_index = threadIdx.x; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { if (ptr_top_mask[c*out_stride_c] == index_shift) { ptr_bottom_diff[c] = static_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c*out_stride_c]); } cached_index += blockDim.x; } } } } } } // namespace TORCH_IMPL_FUNC(max_pool2d_with_indices_out_cuda) (const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& output, const Tensor& indices) { NoNamesGuard guard; TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); if (output.numel() == 0) { return; } const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputHeight = output.size(-2); const int64_t outputWidth = output.size(-1); Tensor input = input_.contiguous(memory_format); const int64_t in_stride_n = input_.ndimension() == 4 ? input.stride(-4) : 0; const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const int count = safe_downcast<int, int64_t>(output.numel()); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = ceil_div( safe_downcast<int, int64_t>(nInputPlane), block_x * 4); int kernel_size_C = ceil_div( safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C); int grid_x = nbatch*kernel_stride_C; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], ceil_div(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * (sizeof(int) + sizeof(scalar_t)); AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock); max_pool_forward_nhwc<scalar_t, scalar_t> <<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>>( input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, in_stride_n, in_stride_c, in_stride_h, in_stride_w, kernel_stride_C, kernel_size_C, output_data, indices_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } case MemoryFormat::Contiguous: { const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BLOCK_THREADS); max_pool_forward_nchw<scalar_t, scalar_t> <<<ceil_div(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); } TORCH_IMPL_FUNC(max_pool2d_with_indices_backward_out_cuda) (const Tensor& gradOutput_, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices, const Tensor& gradInput) { NoNamesGuard guard; TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU(__func__, {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); if (gradOutput_.numel() == 0) { return; } const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); const Tensor input = input_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t in_stride_n = input.ndimension() == 4 ? input.stride(-4) : 0; const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t outputHeight = gradOutput.size(-2); const int64_t outputWidth = gradOutput.size(-1); const int64_t out_stride_c = gradOutput.stride(-3); const int64_t out_stride_h = gradOutput.stride(-2); const int64_t out_stride_w = gradOutput.stride(-1); gradInput.zero_(); int64_t count = input.numel(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = ceil_div( safe_downcast<int, int64_t>(nInputPlane), block_x * 4); int kernel_size_C = ceil_div( safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C); int grid_x = nbatch*kernel_stride_C; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], ceil_div(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], ceil_div(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * sizeof(accscalar_t); AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock); // The backward kernel is launched on input instead output. // If it is launched on output layer, atomic_add would not provide much benefit on FP16. // Please check comments at https://github.com/pytorch/pytorch/pull/34519. max_pool_backward_nhwc<scalar_t, accscalar_t> <<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, out_stride_c, out_stride_h, out_stride_w, in_stride_n, in_stride_c, in_stride_h, in_stride_w, kernel_stride_C, kernel_size_C, gradInput_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } case MemoryFormat::Contiguous: { int imgcount = inputWidth * inputHeight; dim3 grid; const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS; grid.x = blocks; grid.y = nbatch; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; if (maxGridY < grid.y) grid.y = maxGridY; grid.z = nInputPlane; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridZ < grid.z) grid.z = maxGridZ; max_pool_backward_nchw<scalar_t, accscalar_t> <<<grid, BLOCK_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); } } // at::native } // at
737d0aa19125ce539cd5bb31cc5f00158c208d4e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <ctype.h> #include <sys/types.h> #include <sys/mman.h> #include <sys/stat.h> #include <fcntl.h> #include <omp.h> #include "tbb/tick_count.h" using tbb::tick_count; char* map_file(char *filename, int *length_out) { struct stat file_stat; int fd = open(filename, O_RDONLY); if (fd == -1) { printf("failed to open file: %s\n", filename); exit(1); } if (fstat(fd, &file_stat) != 0) { printf("failed to stat file: %s\n", filename); exit(1); } off_t length = file_stat.st_size; void *file = mmap(0, length, PROT_WRITE, MAP_PRIVATE, fd, 0); if (file == (void *)-1) { printf("failed to stat file: %s\n", filename); exit(1); } *length_out = length; return (char *)file; } __global__ void makeUpper(char * file, int length, int total) { int index = blockIdx.x * blockDim.x + threadIdx.x; int startIndex = index * length / total; int endIndex = (index+1) * length / total; for (int i = startIndex; i < endIndex; ++i) { file[i] = (file[i] >= 'a' && file[i] <= 'z') ? (file[i] - 'a' + 'A') : file[i]; } } int main(int argc, char *argv[]) { int length = 0; char *file = map_file(argv[1], &length); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); char * cudaFile = NULL; hipMalloc((void**)&cudaFile, length); hipMemcpy(cudaFile, file, length, hipMemcpyHostToDevice); int numblocks = 4096; int numthreads = 512; hipEventRecord(start); hipLaunchKernelGGL(( makeUpper), dim3(numblocks), dim3(numthreads), 0, 0, cudaFile, length, numblocks * numthreads); hipEventRecord(stop); hipMemcpy(file, cudaFile, length, hipMemcpyDeviceToHost); hipFree(cudaFile); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("time = %f milliseconds\n", milliseconds); }
737d0aa19125ce539cd5bb31cc5f00158c208d4e.cu
#include <string.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <ctype.h> #include <sys/types.h> #include <sys/mman.h> #include <sys/stat.h> #include <fcntl.h> #include <omp.h> #include "tbb/tick_count.h" using tbb::tick_count; char* map_file(char *filename, int *length_out) { struct stat file_stat; int fd = open(filename, O_RDONLY); if (fd == -1) { printf("failed to open file: %s\n", filename); exit(1); } if (fstat(fd, &file_stat) != 0) { printf("failed to stat file: %s\n", filename); exit(1); } off_t length = file_stat.st_size; void *file = mmap(0, length, PROT_WRITE, MAP_PRIVATE, fd, 0); if (file == (void *)-1) { printf("failed to stat file: %s\n", filename); exit(1); } *length_out = length; return (char *)file; } __global__ void makeUpper(char * file, int length, int total) { int index = blockIdx.x * blockDim.x + threadIdx.x; int startIndex = index * length / total; int endIndex = (index+1) * length / total; for (int i = startIndex; i < endIndex; ++i) { file[i] = (file[i] >= 'a' && file[i] <= 'z') ? (file[i] - 'a' + 'A') : file[i]; } } int main(int argc, char *argv[]) { int length = 0; char *file = map_file(argv[1], &length); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); char * cudaFile = NULL; cudaMalloc((void**)&cudaFile, length); cudaMemcpy(cudaFile, file, length, cudaMemcpyHostToDevice); int numblocks = 4096; int numthreads = 512; cudaEventRecord(start); makeUpper<<<numblocks, numthreads>>>(cudaFile, length, numblocks * numthreads); cudaEventRecord(stop); cudaMemcpy(file, cudaFile, length, cudaMemcpyDeviceToHost); cudaFree(cudaFile); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("time = %f milliseconds\n", milliseconds); }
09814c4cca0683ab36c9987c39ca3ecdd90fda28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } int main(void) { int N = 20 * (1 << 20); float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); hipMalloc(&d_x, N*sizeof(float)); hipMalloc(&d_y, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice); hipEventRecord(start); // Perform SAXPY on 1M elements hipLaunchKernelGGL(( saxpy), dim3((N+511)/512), dim3(512), 0, 0, N, 2.0f, d_x, d_y); hipEventRecord(stop); hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = max(maxError, abs(y[i]-4.0f)); } printf("Max error: %f\n", maxError); printf("Effective Bandwidth (GB/s): %f\n", N*4*3/milliseconds/1e6); }
09814c4cca0683ab36c9987c39ca3ecdd90fda28.cu
#include <iostream> #include <math.h> __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } int main(void) { int N = 20 * (1 << 20); float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(start); // Perform SAXPY on 1M elements saxpy<<<(N+511)/512, 512>>>(N, 2.0f, d_x, d_y); cudaEventRecord(stop); cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = max(maxError, abs(y[i]-4.0f)); } printf("Max error: %f\n", maxError); printf("Effective Bandwidth (GB/s): %f\n", N*4*3/milliseconds/1e6); }
1ffe7fbbe23f443298a1df002fbb0bf594821d8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } const int TILE_DIM = 32; const int NUM_REPS = 100; const int SIDE = 8; // Check errors and print GB/s void postprocess(const float *ref, const float *res, int n, float ms) { bool passed = true; for (int i = 0; i < n; i++) if (res[i] != ref[i]) { printf("%d %f %f\n", i, res[i], ref[i]); printf("%25s\n", "*** FAILED ***"); passed = false; break; } if (passed) printf("%20.2f%20.2f\n", ms/NUM_REPS, 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms); } __global__ void matrixTranspose(float *_a, float *_b, const int cols, const int rows) { int i = blockIdx.y * blockDim.y + threadIdx.y; // row int j = blockIdx.x * blockDim.x + threadIdx.x; // col int index_in = i*cols + j; // (i,j) from matrix A int index_out = j*rows + i; // transposed index _b[index_out] = _a[index_in]; } __global__ void matrixTransposeShared(const float *_a, float *_b, const int cols, const int rows) { __shared__ float mat[TILE_DIM][TILE_DIM]; int bx = blockIdx.x *blockDim.x; int by = blockIdx.y *blockDim.y; int i = by + threadIdx.y; int j = bx + threadIdx.x; //input int ti = bx + threadIdx.y; int tj = by + threadIdx.x; //output if (i < rows && j < cols)//i < ny && j < nx mat[threadIdx.x][threadIdx.y] = _a[i * cols + j]; __syncthreads(); //Wait for all data to be copied if (tj < cols && ti < rows) _b[ti * rows + tj] = mat[threadIdx.y][threadIdx.x]; } __global__ void matrixTransposeSharedwBC(const float *_a, float *_b, const int cols, const int rows) { __shared__ float mat[TILE_DIM][TILE_DIM + 1]; int bx = blockIdx.x *blockDim.x; int by = blockIdx.y *blockDim.y; int i = by + threadIdx.y; int j = bx + threadIdx.x; //input int ti = bx + threadIdx.y; int tj = by + threadIdx.x; //output if (i < rows && j < cols)//i < rows && j < cols mat[threadIdx.x][threadIdx.y] = _a[i * cols + j]; __syncthreads(); //Wait for all data to be copied if (tj < cols && ti < rows) _b[ti * rows + tj] = mat[threadIdx.y][threadIdx.x]; } __global__ void matrixTransposeUnrolled(const float *_a, float *_b, const int cols, const int rows) { __shared__ float mat[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; #pragma unroll for (int k = 0; k < TILE_DIM; k += SIDE) { if (x < rows && y + k < cols) mat[threadIdx.y + k][threadIdx.x] = _a[((y + k) * rows) + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; #pragma unroll for (int k = 0; k < TILE_DIM; k += SIDE) { if (x < cols && y + k < rows) _b[(y + k) * cols + x] = mat[threadIdx.x][threadIdx.y + k]; } } int main(int argc, char **argv){ const int nx = 1024; const int ny = 1024; const int mem_size = nx*ny*sizeof(float); dim3 gridDim(nx / TILE_DIM, ny / TILE_DIM, 1); dim3 blockDim(TILE_DIM, TILE_DIM, 1); int devId = 0; if (argc > 1) devId = atoi(argv[1]); hipDeviceProp_t prop; checkCuda(hipGetDeviceProperties(&prop, devId)); printf("\nDevice : %s\n", prop.name); printf("Matrix size: %d %d, Block size: %d %d\n", nx, ny, TILE_DIM, TILE_DIM); printf("gridDim: %d %d %d. blockDim: %d %d %d\n", gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z); checkCuda(hipSetDevice(devId)); float *h_idata = (float*)malloc(mem_size); float *h_tdata = (float*)malloc(mem_size); float *gold = (float*)malloc(mem_size); float *d_idata, *d_cdata, *d_tdata; checkCuda(hipMalloc(&d_idata, mem_size)); checkCuda(hipMalloc(&d_tdata, mem_size)); // host for (int j = 0; j < ny; j++) for (int i = 0; i < nx; i++) h_idata[j*nx + i] = j*nx + i; // correct result for error checking for (int j = 0; j < ny; j++) for (int i = 0; i < nx; i++) gold[j*nx + i] = h_idata[i*nx + j]; // device checkCuda(hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice)); // events for timing hipEvent_t startEvent, stopEvent; checkCuda(hipEventCreate(&startEvent)); checkCuda(hipEventCreate(&stopEvent)); float ms; // ------------ // time kernels // ------------ printf("%25s%20s%25s\n", "Method","Time(ms)", "Bandwidth (GB/s)"); // ---- // matrixTranspose // ---- printf("%25s", "matrixTranspose"); checkCuda(hipMemset(d_tdata, 0, mem_size)); //matrixTranspose << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(hipEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) matrixTranspose << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(hipEventRecord(stopEvent, 0)); checkCuda(hipEventSynchronize(stopEvent)); checkCuda(hipEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost)); postprocess(gold, h_tdata, nx * ny, ms); // ---- // matrixTransposeShared // ---- printf("%25s", "matrixTransposeShared"); checkCuda(hipMemset(d_tdata, 0, mem_size)); //matrixTransposeShared << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(hipEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) matrixTransposeShared << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(hipEventRecord(stopEvent, 0)); checkCuda(hipEventSynchronize(stopEvent)); checkCuda(hipEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost)); postprocess(gold, h_tdata, nx * ny, ms); // ---- // matrixTransposeSharedwBC // ---- printf("%25s", "matrixTransposeSharedwBC"); checkCuda(hipMemset(d_tdata, 0, mem_size)); matrixTransposeSharedwBC << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(hipEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) matrixTransposeSharedwBC << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(hipEventRecord(stopEvent, 0)); checkCuda(hipEventSynchronize(stopEvent)); checkCuda(hipEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost)); postprocess(gold, h_tdata, nx * ny, ms); // ---- // matrixTransposeUnrolled // ---- dim3 blockDimUnroll(TILE_DIM, SIDE, 1);// !important printf("Matrix size: %d %d, Block size: %d %d\n", nx, ny, TILE_DIM, TILE_DIM); printf("gridDim: %d %d %d. blockDim: %d %d %d\n", gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z); printf("%25s", "matrixTransposeUnrolled"); checkCuda(hipMemset(d_tdata, 0, mem_size)); //matrixTransposeUnrolled << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(hipEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) matrixTransposeUnrolled << <gridDim, blockDimUnroll >> >(d_idata, d_tdata, nx, ny); checkCuda(hipEventRecord(stopEvent, 0)); checkCuda(hipEventSynchronize(stopEvent)); checkCuda(hipEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(hipMemcpy(h_tdata, d_tdata, mem_size, hipMemcpyDeviceToHost)); postprocess(gold, h_tdata, nx * ny, ms); error_exit: // cleanup checkCuda(hipEventDestroy(startEvent)); checkCuda(hipEventDestroy(stopEvent)); checkCuda(hipFree(d_tdata)); checkCuda(hipFree(d_idata)); free(h_idata); free(h_tdata); free(gold); }
1ffe7fbbe23f443298a1df002fbb0bf594821d8f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } const int TILE_DIM = 32; const int NUM_REPS = 100; const int SIDE = 8; // Check errors and print GB/s void postprocess(const float *ref, const float *res, int n, float ms) { bool passed = true; for (int i = 0; i < n; i++) if (res[i] != ref[i]) { printf("%d %f %f\n", i, res[i], ref[i]); printf("%25s\n", "*** FAILED ***"); passed = false; break; } if (passed) printf("%20.2f%20.2f\n", ms/NUM_REPS, 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms); } __global__ void matrixTranspose(float *_a, float *_b, const int cols, const int rows) { int i = blockIdx.y * blockDim.y + threadIdx.y; // row int j = blockIdx.x * blockDim.x + threadIdx.x; // col int index_in = i*cols + j; // (i,j) from matrix A int index_out = j*rows + i; // transposed index _b[index_out] = _a[index_in]; } __global__ void matrixTransposeShared(const float *_a, float *_b, const int cols, const int rows) { __shared__ float mat[TILE_DIM][TILE_DIM]; int bx = blockIdx.x *blockDim.x; int by = blockIdx.y *blockDim.y; int i = by + threadIdx.y; int j = bx + threadIdx.x; //input int ti = bx + threadIdx.y; int tj = by + threadIdx.x; //output if (i < rows && j < cols)//i < ny && j < nx mat[threadIdx.x][threadIdx.y] = _a[i * cols + j]; __syncthreads(); //Wait for all data to be copied if (tj < cols && ti < rows) _b[ti * rows + tj] = mat[threadIdx.y][threadIdx.x]; } __global__ void matrixTransposeSharedwBC(const float *_a, float *_b, const int cols, const int rows) { __shared__ float mat[TILE_DIM][TILE_DIM + 1]; int bx = blockIdx.x *blockDim.x; int by = blockIdx.y *blockDim.y; int i = by + threadIdx.y; int j = bx + threadIdx.x; //input int ti = bx + threadIdx.y; int tj = by + threadIdx.x; //output if (i < rows && j < cols)//i < rows && j < cols mat[threadIdx.x][threadIdx.y] = _a[i * cols + j]; __syncthreads(); //Wait for all data to be copied if (tj < cols && ti < rows) _b[ti * rows + tj] = mat[threadIdx.y][threadIdx.x]; } __global__ void matrixTransposeUnrolled(const float *_a, float *_b, const int cols, const int rows) { __shared__ float mat[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; #pragma unroll for (int k = 0; k < TILE_DIM; k += SIDE) { if (x < rows && y + k < cols) mat[threadIdx.y + k][threadIdx.x] = _a[((y + k) * rows) + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; y = blockIdx.x * TILE_DIM + threadIdx.y; #pragma unroll for (int k = 0; k < TILE_DIM; k += SIDE) { if (x < cols && y + k < rows) _b[(y + k) * cols + x] = mat[threadIdx.x][threadIdx.y + k]; } } int main(int argc, char **argv){ const int nx = 1024; const int ny = 1024; const int mem_size = nx*ny*sizeof(float); dim3 gridDim(nx / TILE_DIM, ny / TILE_DIM, 1); dim3 blockDim(TILE_DIM, TILE_DIM, 1); int devId = 0; if (argc > 1) devId = atoi(argv[1]); cudaDeviceProp prop; checkCuda(cudaGetDeviceProperties(&prop, devId)); printf("\nDevice : %s\n", prop.name); printf("Matrix size: %d %d, Block size: %d %d\n", nx, ny, TILE_DIM, TILE_DIM); printf("gridDim: %d %d %d. blockDim: %d %d %d\n", gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z); checkCuda(cudaSetDevice(devId)); float *h_idata = (float*)malloc(mem_size); float *h_tdata = (float*)malloc(mem_size); float *gold = (float*)malloc(mem_size); float *d_idata, *d_cdata, *d_tdata; checkCuda(cudaMalloc(&d_idata, mem_size)); checkCuda(cudaMalloc(&d_tdata, mem_size)); // host for (int j = 0; j < ny; j++) for (int i = 0; i < nx; i++) h_idata[j*nx + i] = j*nx + i; // correct result for error checking for (int j = 0; j < ny; j++) for (int i = 0; i < nx; i++) gold[j*nx + i] = h_idata[i*nx + j]; // device checkCuda(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice)); // events for timing cudaEvent_t startEvent, stopEvent; checkCuda(cudaEventCreate(&startEvent)); checkCuda(cudaEventCreate(&stopEvent)); float ms; // ------------ // time kernels // ------------ printf("%25s%20s%25s\n", "Method","Time(ms)", "Bandwidth (GB/s)"); // ---- // matrixTranspose // ---- printf("%25s", "matrixTranspose"); checkCuda(cudaMemset(d_tdata, 0, mem_size)); //matrixTranspose << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(cudaEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) matrixTranspose << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost)); postprocess(gold, h_tdata, nx * ny, ms); // ---- // matrixTransposeShared // ---- printf("%25s", "matrixTransposeShared"); checkCuda(cudaMemset(d_tdata, 0, mem_size)); //matrixTransposeShared << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(cudaEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) matrixTransposeShared << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost)); postprocess(gold, h_tdata, nx * ny, ms); // ---- // matrixTransposeSharedwBC // ---- printf("%25s", "matrixTransposeSharedwBC"); checkCuda(cudaMemset(d_tdata, 0, mem_size)); matrixTransposeSharedwBC << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(cudaEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) matrixTransposeSharedwBC << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost)); postprocess(gold, h_tdata, nx * ny, ms); // ---- // matrixTransposeUnrolled // ---- dim3 blockDimUnroll(TILE_DIM, SIDE, 1);// !important printf("Matrix size: %d %d, Block size: %d %d\n", nx, ny, TILE_DIM, TILE_DIM); printf("gridDim: %d %d %d. blockDim: %d %d %d\n", gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z); printf("%25s", "matrixTransposeUnrolled"); checkCuda(cudaMemset(d_tdata, 0, mem_size)); //matrixTransposeUnrolled << <gridDim, blockDim >> >(d_idata, d_tdata, nx, ny); checkCuda(cudaEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) matrixTransposeUnrolled << <gridDim, blockDimUnroll >> >(d_idata, d_tdata, nx, ny); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost)); postprocess(gold, h_tdata, nx * ny, ms); error_exit: // cleanup checkCuda(cudaEventDestroy(startEvent)); checkCuda(cudaEventDestroy(stopEvent)); checkCuda(cudaFree(d_tdata)); checkCuda(cudaFree(d_idata)); free(h_idata); free(h_tdata); free(gold); }
2a766cf1c5bd342ee91a4e4165563bffd7b8a97d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" #include "col2im.h" #include "hip/hip_runtime.h" // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __global__ void col2im_gpu_kernel(const int n, const float* data_col, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_im) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ float val = 0; int w = index % width + pad; int h = (index / width) % height + pad; int c = index / (width * height); // compute the start and end of the output int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1; int w_col_end = min(w / stride + 1, width_col); int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1; int h_col_end = min(h / stride + 1, height_col); // equivalent implementation int offset = (c * ksize * ksize + h * ksize + w) * height_col * width_col; int coeff_h_col = (1 - stride * ksize * height_col) * width_col; int coeff_w_col = (1 - stride * height_col * width_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; } } data_im[index] += val; } } void col2im_ongpu(float *data_col, int channels, int height, int width, int ksize, int stride, int pad, float *data_im){ // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height * width; hipLaunchKernelGGL(( col2im_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK), dim3(BLOCK), 0, get_cuda_stream() , num_kernels, data_col, height, width, ksize, pad, stride, height_col, width_col, data_im); CHECK_CUDA(hipPeekAtLastError()); }
2a766cf1c5bd342ee91a4e4165563bffd7b8a97d.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" #include "col2im.h" #include "cuda.h" // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE __global__ void col2im_gpu_kernel(const int n, const float* data_col, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_im) { int index = blockIdx.x*blockDim.x+threadIdx.x; for(; index < n; index += blockDim.x*gridDim.x){ float val = 0; int w = index % width + pad; int h = (index / width) % height + pad; int c = index / (width * height); // compute the start and end of the output int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1; int w_col_end = min(w / stride + 1, width_col); int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1; int h_col_end = min(h / stride + 1, height_col); // equivalent implementation int offset = (c * ksize * ksize + h * ksize + w) * height_col * width_col; int coeff_h_col = (1 - stride * ksize * height_col) * width_col; int coeff_w_col = (1 - stride * height_col * width_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; } } data_im[index] += val; } } void col2im_ongpu(float *data_col, int channels, int height, int width, int ksize, int stride, int pad, float *data_im){ // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height * width; col2im_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK, BLOCK, 0, get_cuda_stream() >>>( num_kernels, data_col, height, width, ksize, pad, stride, height_col, width_col, data_im); CHECK_CUDA(cudaPeekAtLastError()); }
8f2706a22763dfaae3a247da1bfdabefbe2bcd5d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mdCuda.h" int main(int argc, char* argv[]) { if(CheckParameters(argc, argv) == false) return 0; if(OpenFiles() == false) return -1; if(Input() == false) return -1; if(OnlyCpu) SolveCpu(); else SolveCuda(100); CloseFiles(); return 0; } bool OpenFiles() { if(FileExists("mdse.out")) { cout << "mdse.out already exists. Enter 'y' to overwrite, 'n' to exit: "; string answer; cin >> answer; if(answer != "y"){ cout << "Stopping." << endl; return false; } } fileInp.open("mdse.inp"); if(fileInp.good() == false) { cout << "mdse.inp couldn't be opened for reading. Stopping." << endl; return false; } fileOut.open("mdse.out"); if(fileInp.good() == false) { cout << "mdse.out couldn't be opened for writing. Stopping." << endl; return false; } fileOut << fixed << setprecision(5); fileEne.open("mdse.ene"); if(fileEne.good() == false) { cout << "mdse.ene couldn't be opened for writing. Stopping." << endl; return false; } fileEne << fixed << setprecision(5); filePic.open("mdse.pic"); if(filePic.good() == false) { cout << "mdse.pic couldn't be opened for writing. Stopping." << endl; return false; } filePic << fixed << setprecision(5); fileBs.open("mdse.bs"); if(fileBs.good() == false) { cout << "mdse.bs couldn't be opened for writing. Stopping." << endl; return false; } fileBs << fixed << setprecision(5); return true; } bool FileExists(const string& filename) { struct stat buf; if (stat(filename.c_str(), &buf) != -1) { return true; } return false; } bool Input() { // Potential parameters for Cu /*RM = 63.546; DT = 0.9E-15; A1 = 110.766008; A2 = -46.1649783; RL1 = 2.09045946; RL2 = 1.49853083; AL1 = 0.394142248; AL2 = 0.207225507; D21 = 0.436092895; D22 = 0.245082238; */ // Potential parameters for Au RM=196.9665; DT=1.6E-15; A1=345.923364; A2=-38.9245908; RL1=1.0428923; RL2=1.05974062; AL1=0.750775965; AL2=0.229377368; D21=0.888911352; D22=0.254280292; double FACM = 0.103655772E-27; BK = 8.617385E-05; RM = RM * FACM; try { // Read the title Title = ReadLine(); // Skip the second line ReadLine(); // Read MDSL, IAVL, IPPL, ISCAL, IPD, TE, NA, LAYER, IPBC, PP(1), PP(2), PP(3) MDSL = GetValueInt(); IAVL = GetValueInt(); IPPL = GetValueInt(); ISCAL = GetValueInt(); IPD = GetValueInt(); TE = GetValueDouble(); NA = GetValueInt(); LAYER = GetValueInt(); IPBC = GetValueInt(); PP[0] = GetValueDouble(); PP[1] = GetValueDouble(); PP[2] = GetValueDouble(); // Generate atom coordinates GenerateLatis(); // Sort atoms by the z axis SortAtoms('Z'); // Find the periodic boundary limits if PBC is applied FindBoundaries(); } catch(exception& e) { cout << "Error in Input(): " << e.what() << endl; return false; } return true; } bool SolveCpu() { // Initialize some variables and define some factors MDS = 0; // Current md simulation step; int IPP=0; // Print counter double EPAV = 0; // Average potential energy double EKAV = 0; // Average kinetic energy double ETAV = 0; // Average total energy double SCFAV = 0; // Average scaling factor TCALAV = 0; // System temperature int IAV = 0; // Average counter int ISCA = 0; // Scaling counter double FFPR[MAX_ATOMS][3]; // Array to store forces from previous step // Calculate the initial potential energy of each atom and the initial force that each atom experiences ForceCpu(); // SET INITIAL VELOCITIES ACC. TO MAXWELL VEL. DISTRIBUTION MaxWell(); // Printing initially distributed velocities, potential energies, forces, total energy and temperature PrintInitial(); fileOut << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl; fileOut << "# MDS EPAV EKAV ETAV TCALAV" << endl; fileOut << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl; fileEne << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl; fileEne << "# MDS EPAV EKAV ETAV TCALAV" << endl; fileEne << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl; // Start Md Steps while(MDS < MDSL){ MDS++; IPP++; ISCA++; // Show status at each 100 steps if((MDS % 100) == 0) ShowStatus(); // Reposition the particles if PBC is applied if(IPBC != 0) Reposition(); // Calculate velocity and position of the particles using the velocity summed form of verlet algorithm (NVE MD velocity form) ForceCpu(); // Compute the positions at time step n+1 as: // ri(n+1)=ri(n)+hvi(n)+(1/2m)h2Fi(n) for(int i=0; i<NA; i++){ X[i] = X[i] + DT*VV[i][0] + (pow(DT,2)*FF[i][0]) / (2*RM); Y[i] = Y[i] + DT*VV[i][1] + (pow(DT,2)*FF[i][1]) / (2*RM); Z[i] = Z[i] + DT*VV[i][2] + (pow(DT,2)*FF[i][2]) / (2*RM); } // Store the forces at time step Fi(n) memcpy(FFPR, FF, NA*3*sizeof(double)); //for(int i=0; i<NA; i++){ // for(int j=0; j<3; j++){ // FFPR[i][j] = FF[i][j]; // } //} ForceCpu(); // Compute the velocities at time step n+1 as // vi(n+1)=vi(n)+(h/2m)(Fi(n+1)+Fi(n)) // Calculate the temperature that system reached by calculating the kinetic energy of each atom EKINA = 0; for(int i=0; i<NA; i++){ VV[i][0] += DT * (FF[i][0]+FFPR[i][0]) / (2*RM); VV[i][1] += DT * (FF[i][1]+FFPR[i][1]) / (2*RM); VV[i][2] += DT * (FF[i][2]+FFPR[i][2]) / (2*RM); EKINA += pow(VV[i][0],2) + pow(VV[i][1],2) + pow(VV[i][2],2); } EKINA *= RM; TCALC = EKINA / (3*NA*BK); // Calculate the scaling factor and scale the velocities SCFAC = sqrt(TE/TCALC); if(ISCA == ISCAL) { EKIN = 0; for(int i=0; i<NA; i++){ VV[i][0] *= SCFAC; VV[i][1] *= SCFAC; VV[i][2] *= SCFAC; EKIN += pow(VV[i][0],2) + pow(VV[i][1],2) + pow(VV[i][2],2); } ISCA = 0; EKIN *= RM; TCALC = EKIN / (3*NA*BK); } // Calculate total energy ETOT = EPOT + EKINA; // Calculate the averages of EPOT, EKINA, ETOT, SCFAC AND TCALC EPAV += EPOT; EKAV += EKINA; ETAV += ETOT; SCFAV += SCFAC; TCALAV += TCALC; IAV++; if(IAV < IAVL) continue; EPAV /= IAVL; EKAV /= IAVL; ETAV /= IAVL; SCFAV /= IAVL; TCALAV /= IAVL; // Print the averages fileOut << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed; fileEne << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed; // Periodic printing of coordinates if(IPP == IPPL){ PrintPeriodic(); IPP = 0; } IAV = 0; EPAV = 0; EKAV = 0; ETAV = 0; SCFAV = 0; TCALAV = 0; } // Md Steps Loop PrintFinal(); return true; } bool SolveCuda(int kernelStepCount) { // Initialize some variables and define some factors MDS = 0; // Current md simulation step; int IPP=0; // Print counter double EPAV = 0; // Average potential energy double EKAV = 0; // Average kinetic energy double ETAV = 0; // Average total energy double SCFAV = 0; // Average scaling factor TCALAV = 0; // System temperature int IAV = 0; // Average counter int ISCA = 0; // Scaling counter // Use floats for cuda int sizeNA = NA * sizeof(float); CuErr( hipHostMalloc(&h_X, sizeNA)); CuErr( hipHostMalloc(&h_Y, sizeNA)); CuErr( hipHostMalloc(&h_Z, sizeNA)); for(int i=0; i<NA; i++){ h_X[i] = (float)X[i]; h_Y[i] = (float)Y[i]; h_Z[i] = (float)Z[i]; } // Calculate the initial potential energy of each atom and the initial force that each atom experiences ForceCpu(); // SET INITIAL VELOCITIES ACC. TO MAXWELL VEL. DISTRIBUTION MaxWell(); // Printing initially distributed velocities, potential energies, forces, total energy and temperature PrintInitial(); fileOut << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl; fileOut << "# MDS EPAV EKAV ETAV TCALAV" << endl; fileOut << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl; fileEne << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl; fileEne << "# MDS EPAV EKAV ETAV TCALAV" << endl; fileEne << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl; // Start Md Steps while(MDS < MDSL){ MDS += kernelStepCount; IPP += kernelStepCount; ISCA += kernelStepCount; IAV += kernelStepCount; LaunchCudaKernel(kernelStepCount); // Show status at each 100 steps ShowStatus(); // Averages if(IAV >= IAVL) { EPAV /= IAVL; EKAV /= IAVL; ETAV /= IAVL; SCFAV /= IAVL; TCALAV /= IAVL; // Print the averages fileOut << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed; fileEne << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed; IAV = 0; EPAV = 0; EKAV = 0; ETAV = 0; SCFAV = 0; TCALAV = 0; } // Periodic printing of coordinates if(IPP >= IPPL){ PrintPeriodic(); IPP = 0; } } // Md Steps Loop // Used floats for cuda for(int i=0; i<NA; i++){ X[i] = h_X[i]; Y[i] = h_Y[i]; Z[i] = h_Z[i]; } PrintFinal(); return true; } bool CloseFiles() { fileInp.close(); fileOut.close(); fileEne.close(); filePic.close(); fileBs.close(); CuErr( hipHostFree(h_FFX)); CuErr( hipHostFree(h_FFY)); CuErr( hipHostFree(h_FFZ)); CuErr( hipHostFree(h_Params)); CuErr( hipFree(d_FFX)); CuErr( hipFree(d_FFY)); CuErr( hipFree(d_FFZ)); CuErr( hipFree(d_EE)); CuErr( hipFree(d_X)); CuErr( hipFree(d_Y)); CuErr( hipFree(d_Z)); CuErr( hipFree(d_Params)); return true; } void ShowStatus() { cout << "\rMDS Steps: " << MDS << " of " << MDSL; } string GetTime() { time_t rawtime; struct tm * timeinfo; char chars[100]; time ( &rawtime ); timeinfo = localtime ( &rawtime ); strftime (chars, 100, "%Y.%m.%d %H:%M:%S", timeinfo); string final = " DATE AND TIME: "; final += chars; return final; } void ForceCpu() { double E2 = 0; // Total energy double XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2; double ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ; double FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ; int i, j; #pragma omp parallel for private(i,j,EPP,FX2,FY2,FZ2,RIJ,RIJ2,XIJ,YIJ,ZIJ,ARG1,ARG2,EXP1,EXP2,UIJ1,UIJ2,UIJ,FAC1,FAC2,FAC12,XRIJ,YRIJ,ZRIJ) reduction(+:E2) for(i=0; i<NA; i++) { EE[i] = 0; EPP = 0; //Forces that effect atoms indexed with i in all three axes FX2 = 0; FY2 = 0; FZ2 = 0; for(j=0; j<NA; j++) { if(i == j) continue; // Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ Period(i, j, XIJ, YIJ, ZIJ, RIJ2, RIJ); // Calculate potential energy U(r) ARG1 = AL1*RIJ2; ARG2 = AL2*RIJ2; EXP1 = exp(-ARG1); EXP2 = exp(-ARG2); UIJ1 = A1*EXP1/(pow(RIJ,RL1)); UIJ2 = A2*EXP2/(pow(RIJ,RL2)); UIJ = D21*UIJ1 + D22*UIJ2; EPP += UIJ; // Calculate forces FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ); FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ); FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2; XRIJ = XIJ/RIJ; YRIJ = YIJ/RIJ; ZRIJ = ZIJ/RIJ; FX2 += FAC12*XRIJ; FY2 += FAC12*YRIJ; FZ2 += FAC12*ZRIJ; } FF[i][0] = -FX2; FF[i][1] = -FY2; FF[i][2] = -FZ2; EE[i] = EPP; E2 += EPP; //FFF[i] = sqrt(FF[i][0]*FF[i][0] + FF[i][1]*FF[i][1] + FF[i][2]*FF[i][2]); } EPOT = E2; } void LaunchCudaKernel(int numberOfSteps) { int sizeNA = NA * sizeof(float); // Pointers are global, allocating once is enough if(h_FFX == NULL){ CuErr( hipHostMalloc(&h_FFX, sizeNA)); CuErr( hipHostMalloc(&h_FFY, sizeNA)); CuErr( hipHostMalloc(&h_FFZ, sizeNA)); CuErr( hipMalloc(&d_FFX, sizeNA)); CuErr( hipMalloc(&d_FFY, sizeNA)); CuErr( hipMalloc(&d_FFZ, sizeNA)); CuErr( hipMalloc(&d_EE, sizeNA)); CuErr( hipMalloc(&d_X, sizeNA)); CuErr( hipMalloc(&d_Y, sizeNA)); CuErr( hipMalloc(&d_Z, sizeNA)); //hipChannelFormatDesc chanDouble = hipCreateChannelDesc<double>(); //CuErr( hipBindTexture(0, &texX, d_X, &chanDouble, sizeNA)); //CuErr( hipBindTexture(0, &texY, d_Y, &chanDouble, sizeNA)); //CuErr( hipBindTexture(0, &texZ, d_Z, &chanDouble, sizeNA)); } CuErr( hipMemcpy(d_X, h_X, sizeNA, hipMemcpyHostToDevice)); CuErr( hipMemcpy(d_Y, h_Y, sizeNA, hipMemcpyHostToDevice)); CuErr( hipMemcpy(d_Z, h_Z, sizeNA, hipMemcpyHostToDevice)); int blockSize = 32; int numBlocks = NA / blockSize + (NA % blockSize == 0 ? 0:1); hipLaunchKernelGGL(( mdKernel) , dim3(numBlocks), dim3(blockSize) , 0, 0, NA, d_FFX, d_FFY, d_FFZ, d_EE, d_X, d_Y, d_Z, IPBC, PP[0], PP[1], PP[2], AL1, AL2, A1, A2, RL1, RL2, D21, D22, PA[0], PA[1], PA[2], PB[0], PB[1], PB[2], PL[0], PL[1], PL[2]); CuErrC("kernelForce kernel execution failed"); CuErr( hipMemcpy(X, d_X, sizeNA, hipMemcpyDeviceToHost)); CuErr( hipMemcpy(Y, d_Y, sizeNA, hipMemcpyDeviceToHost)); CuErr( hipMemcpy(Z, d_Z, sizeNA, hipMemcpyDeviceToHost)); CuErr( hipMemcpy(h_FFX, d_FFX, sizeNA, hipMemcpyDeviceToHost)); CuErr( hipMemcpy(h_FFY, d_FFY, sizeNA, hipMemcpyDeviceToHost)); CuErr( hipMemcpy(h_FFZ, d_FFZ, sizeNA, hipMemcpyDeviceToHost)); CuErr( hipMemcpy(EE, d_EE, sizeNA, hipMemcpyDeviceToHost)); EPOT = 0; for(int i=0; i<NA; i++){ FF[i][0] = h_FFX[i]; FF[i][1] = h_FFY[i]; FF[i][2] = h_FFZ[i]; EPOT += EE[i]; } } void FindBoundaries() { if(IPBC == 0) return; for(int i=0; i<3; i++) PL[i] = PP[i] / 2.0; // Find smallest coordinates for X, Y and Z coordinates PA[0] = X[0]; PA[1] = Y[0]; PA[2] = Z[0]; for(int i=1; i<NN; i++) { if(PA[0] > X[i]) PA[0] = X[i]; if(PA[1] > Y[i]) PA[1] = Y[i]; if(PA[2] > Z[i]) PA[2] = Z[i]; } // Find ending coordinates of working system PB[0] = PA[0] + PP[0]; PB[1] = PA[1] + PP[1]; PB[2] = PA[2] + PP[2]; } // PRINTING OF POSITIONS, FORCES, AND ENERGIES void PrintCoordinatesForcesEnergy(){ fileOut << " I X Y Z FX FY FZ EE" << endl; fileOut << " ------ --------- ------------ ------------ ------------ ------------ ------------ ------------" << endl << endl; for(int i=0; i<NA; i++){ fileOut << setw(6) << i+1; fileOut << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << " "; fileOut << scientific << setw(13) << FF[i][0] << " " << setw(13) << FF[i][1] << " " << setw(13) << FF[i][2] << " " << setw(13) << EE[i]; fileOut << fixed << endl; } } void PrintInitial() { string str; fileInp.clear(); fileInp.seekg(0, ios::beg); if(PSilent == false) cout << "Simulation started" << endl; fileOut << "******************************************************************************************" << endl; fileOut << Title << endl; fileOut << "******************************************************************************************" << endl << endl; fileOut << GetTime() << endl << endl; tStart = clock(); getline(fileInp, str); getline(fileInp, str); fileOut << str << endl; getline(fileInp, str); fileOut << str << endl << endl; getline(fileInp, str); fileOut << " INITIAL COORDINATES:" << endl; for(int i=0; i<LAYER; i++){ getline(fileInp, str); fileOut << str << endl; } fileOut << "******************************************************************************************" << endl << endl; fileOut << " NUMBER OF MOVING ATOMS: NA= " << NA << endl; fileOut << " NUMBER OF TOTAL ATOMS: NN= " << NN << endl << endl; fileOut << " INITIAL COORDINATES OF ALL ATOMS: (X,Y,Z)" << endl << endl; for(int i=0; i<NN; i++){ fileOut << setw(5) << i+1 << " " << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl; } fileOut << "******************************************************************************************" << endl << endl; fileOut << endl << " INITIAL COORDINATES, FORCES AND ENERGIES:" << endl << endl; PrintCoordinatesForcesEnergy(); fileOut << endl << scientific; fileOut << " EPOT=" << EPOT << " EKIN=" << EKIN << " TCALC=" << TCALC << " SCFAC=" << SCFAC << endl << endl << fixed; } void PrintPeriodic() { fileOut << endl << endl << " PERIODIC PRINTING OF COORDINATES, FORCES AND ENERGIES AT MDS: " << MDS << endl << endl; PrintCoordinatesForcesEnergy(); fileOut << endl << scientific; fileOut << " EPOT=" << EPOT << " EKIN=" << EKIN << " TCALC=" << TCALC; fileOut << " SCFAC=" << SCFAC << endl << endl << fixed; } void PrintFinal() { if(IPBC != 0) Reposition(); fileOut << endl << endl << " FINAL COORDINATES, FORCES AND ENERGIES:" << endl << endl; PrintCoordinatesForcesEnergy(); fileOut << endl << scientific; fileOut << " EPOT=" << EPOT << " EKINA=" << EKINA << " ETOT=" << ETOT << " TCALC=" << TCALC << endl << endl << fixed; PrintElapsedTime(); fileOut << " *************** END OF THE CALCULATION ***************"; WritePicFile(); WriteBsFile(); if(PSilent == false) cout << endl << "Simulation complete" << endl; } void PrintElapsedTime() { // Write current time fileOut << endl << GetTime() << endl << endl; // Calculate and write elapsed time tStop = clock(); float seconds = float(tStop - tStart)/CLOCKS_PER_SEC; int minutes = int(seconds/60); seconds -= minutes*60; int hours = minutes/60; minutes -= hours*60; fileOut << " ELAPSED TIME: " << hours << " HOURS " << minutes << " MINUTES " << seconds << " SECONDS" << endl << endl; } // RANDOM NUMBER GENERATOR, GENERATES RN IN THE INTERVAL (-1,1) double Randum(double U, double S) { U = 23*U + 0.21132486579; if((U-1.0) >= 0) U = U - int(U); if(U > 0.5) S = -S; U = U-int(U); return (S * U); } // DISTRUBUTES THE VELOCITIES FOR THE ATOMS FOR THE SPECIFIED // TEMPERATURE TE ACCORDING TO THE MAXWELL VELOCITY DISTRIBUTION void MaxWell() { double FAC1 = sqrt(3.0*BK*TE/RM); double U = 0.0; double S = 1.0; double VVX = 0.0; double VVY = 0.0; double VVZ = 0.0; double FAC2 = (2.0/3.0) * FAC1; FAC2 /= sqrt(3.0); // EQUATING Vmean TO FAC2 for(int i=0; i<NA; i++){ for(int j=0; j<3; j++){ VV[i][j] = (FAC2 - FAC2*Randum(U,S)); } } // CALCULATING AVERAGES double VVV = 0.0; for(int i=0; i<NA; i++){ VVX = VVX + VV[i][0]; VVY = VVY + VV[i][1]; VVZ = VVZ + VV[i][2]; } VVX /= NA; VVY /= NA; VVZ /= NA; VVV = VVX*VVX + VVY*VVY + VVZ*VVZ; double COSX = VVX / sqrt(VVV); double COSY = VVY / sqrt(VVV); double COSZ = VVZ / sqrt(VVV); // CALCULATING EKIN AND TEMPERATURE WRT THE CALCULATED Vmean EKIN = 0.5 * RM * (VVV * (9.0/4.0)); TCALC = EKIN / (1.5 * BK); // CALCULATING THE SCALING FACTOR SCFAC = sqrt(TE / TCALC); // REDISTRIBUTING THE INITIAL VELOCITIES WRT SCALING FACTOR VVV = sqrt(VVV); double VVXNEW = COSX * VVV * SCFAC; double VVYNEW = COSY * VVV * SCFAC; double VVZNEW = COSZ * VVV * SCFAC; double XSCALE = (VVXNEW-VVX); double YSCALE = (VVYNEW-VVY); double ZSCALE = (VVZNEW-VVZ); for(int i=0; i<NA; i++){ VV[i][0] += XSCALE; VV[i][1] += YSCALE; VV[i][2] += ZSCALE; VT[i] = pow(VV[i][0],2.0) + pow(VV[i][1],2) + pow(VV[i][2],2); } // CALCULATING AVERAGES OF SCALED VELOCITIES VVX = 0; VVY = 0; VVZ = 0; for(int i=0; i<NA; i++){ VVX += VV[i][0]; VVY += VV[i][1]; VVZ += VV[i][2]; } VVX /= NA; VVY /= NA; VVZ /= NA; // CALCULATING EKIN AND TEMPERATURE WRT THE SCALED Vmean VVV = VVX*VVX + VVY*VVY + VVZ*VVZ; EKIN = 0.5 * RM * (VVV * (9/4)); TCALC = EKIN / (1.5 * BK); ETOT = EPOT + EKIN; } // REPOSITIONS COORDINATES WHEN ANY MOVING ATOM CROSSES THE BOUNDARY. void Reposition() { double PAPL, H, B; if(PP[0] > 0){ PAPL = PA[0] + PL[0]; for(int i=0; i<NA; i++){ H = (X[i]-PAPL) / PL[0]; B = H - 2.0*int(H); X[i] = B*PL[0] + PAPL; } } if(PP[1] > 0){ PAPL = PA[1] + PL[1]; for(int i=0; i<NA; i++){ H = (Y[i]-PAPL) / PL[1]; B = H - 2.0*int(H); Y[i] = B*PL[1] + PAPL; } } if(PP[2] > 0){ PAPL = PA[2] + PL[2]; for(int i=0; i<NA; i++){ H = (Z[i]-PAPL) / PL[2]; B = H - 2.0*int(H); Z[i] = B*PL[2] + PAPL; } } } // Sorts atoms by the given axis void SortAtoms(char sortAxis) { double *sortArray; if(sortAxis == 'X') sortArray = X; else if(sortAxis == 'Y') sortArray = Y; else sortArray = Z; double tempX, tempY, tempZ; for (int i = 0; i < NA; i++) { for (int j = i+1; j < NA; j++) { if (sortArray[i] > sortArray[j]) { tempX = X[i]; tempY = Y[i]; tempZ = Z[i]; X[i] = X[j]; Y[i] = Y[j]; Z[i] = Z[j]; X[j] = tempX; Y[j] = tempY; Z[j] = tempZ; } } } } // Generates the atoms according to coordinates and repeat parameters from the input // In the input, the first 3 numbers are x,y,z coordinates, the second 3 numbers are unit cell lengths // and the last 3 numbers specify how many times to copy that atom in x,y,z direction void GenerateLatis() { // Skip the first line: (W(J,K),K=1,6),(NO(J,K),K=1,3) ReadLine(); NN = 0; for(int i=0; i<LAYER; i++) { double coordinateX = GetValueDouble(); double coordinateY = GetValueDouble(); double coordinateZ = GetValueDouble(); double unitCellLengthX = GetValueDouble(); double unitCellLengthY = GetValueDouble(); double unitCellLengthZ = GetValueDouble(); int multiplierX = GetValueInt(); int multiplierY = GetValueInt(); int multiplierZ = GetValueInt(); for (int iX = 0; iX < multiplierX; iX++) { for (int iY = 0; iY < multiplierY; iY++) { for (int iZ = 0; iZ < multiplierZ; iZ++) { double newCoordinateX = coordinateX + (iX * unitCellLengthX); double newCoordinateY = coordinateY + (iY * unitCellLengthY); double newCoordinateZ = coordinateZ + (iZ * unitCellLengthZ); X[NN] = newCoordinateX; Y[NN] = newCoordinateY; Z[NN] = newCoordinateZ; NN++; if(NN > MAX_ATOMS) { cout << "The number of atoms cannot exceed " << MAX_ATOMS << ". Stopping."; exit(1); } } } } } if (NN != NA) cout << "Warning: number of total atoms NN is different from number of moving atoms NA." << endl; } string GetValue() { SkipSpace(); string val = ""; char c; do { fileInp.get(c); val += c; } while ((c != ' ') && (c != ',') && (c != '\n') && (c != '\r') && (fileInp.eof() != true)); val = val.substr(0, val.size() - 1); return val; } int GetValueInt() { string str = GetValue(); int result = 0; bool success = (stringstream(str) >> result); if(success == false) { cout << "Error converting input to integer. Stopping." << endl; exit(1); } return result; } double GetValueDouble() { string str = GetValue(); double result = 0; bool success = (stringstream(str) >> result); if(success == false) { cout << "Error converting input to double. Stopping." << endl; exit(1); } return result; } float GetValueFloat() { string str = GetValue(); float result = 0; bool success = (stringstream(str) >> result); if(success == false) { cout << "Error converting input to double. Stopping." << endl; exit(1); } return result; } string SkipSpace() { string val = ""; char c; do { fileInp.get(c); val += c; } while ((c == ' ') || (c == ',') || (c == '\n') || (c == '\r')); val = val.substr(0, val.size() - 1); fileInp.unget(); return val; } string ReadLine() { string line = ""; getline(fileInp, line); return line; } // Calculates interatomic distance between atoms I and J double Distance(int i, int j) { double XX = X[i] - X[j]; double YY = Y[i] - Y[j]; double ZZ = Z[i] - Z[j]; return XX*XX + YY*YY + ZZ*ZZ; } void WritePicFile() { double EB = EPOT / NA; filePic << " NN=" << NN << " NA=" << NA << " TOTPE=" << EPOT << " APEPP=" << EB << endl; for(int i=0; i<NA; i++){ filePic << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl; } } void WriteBsFile() { for(int i=0; i<NA; i++){ fileBs << "atom Au " << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl; } fileBs << "" << endl; fileBs << "spec Au 0.50 1 0.75 0" << endl; // "atom size" "color codes rgb" fileBs << "" << endl; fileBs << "bonds Au Au 1.0 4.05 0.03 0.5 0.7 0.9" << endl; // Bond "min length" "max length" "line width" "color codes rgb" fileBs << "" << endl; fileBs << "tmat 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" << endl; fileBs << "dist 100.0" << endl; fileBs << "inc 1.0" << endl; fileBs << "scale 40.0" << endl; fileBs << "rfac 1.0" << endl; fileBs << "bfac 1.0" << endl; fileBs << "switches 1 0 1 0 0 1 1 0 0" << endl; fileBs << "" << endl; } // Apply periodic boundry condition and find distances between the two particles // Because of the periodic boundary, the distance may be the one in this working system or the particle in the adjacent system. void Period(int i, int j, double &XIJ, double &YIJ, double &ZIJ, double &RIJ2, double &RIJ) { XIJ = X[i] - X[j]; YIJ = Y[i] - Y[j]; ZIJ = Z[i] - Z[j]; double DD, ID; if(IPBC != 0){ if(PP[0] > 0){ DD = XIJ / PP[0]; ID = int(DD); XIJ = XIJ - PP[0]*(ID+int(2.0*(DD-ID))); } if(PP[1] > 0){ DD = YIJ / PP[1]; ID = int(DD); YIJ = YIJ - PP[1]*(ID+int(2.0*(DD-ID))); } if(PP[2] > 0){ DD = ZIJ / PP[2]; ID = int(DD); ZIJ = ZIJ - PP[2]*(ID+int(2.0*(DD-ID))); } } RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ; RIJ = sqrt(RIJ2); } // Check program starting parameters bool CheckParameters(int argc, char* argv[]) { PSilent = false; OnlyCpu = false; SetPriorityClass(GetCurrentProcess(),IDLE_PRIORITY_CLASS); for(int i=1; i<argc; i++) { string parameter = argv[i]; if(parameter == "-help"){ cout << "Use parameter '-s' for silent mode. No output will be given to the console." << endl; cout << "Use parameter '-cpu' for cpu calculations only (otherwise Nvidia graphics card required)." << endl; return false; } else if(parameter == "-s"){ PSilent = true; } else if(parameter == "-cpu"){ OnlyCpu = true; cout << "-cpu option received. Will use only cpu for computations." << endl; } } return true; }
8f2706a22763dfaae3a247da1bfdabefbe2bcd5d.cu
#include "mdCuda.h" int main(int argc, char* argv[]) { if(CheckParameters(argc, argv) == false) return 0; if(OpenFiles() == false) return -1; if(Input() == false) return -1; if(OnlyCpu) SolveCpu(); else SolveCuda(100); CloseFiles(); return 0; } bool OpenFiles() { if(FileExists("mdse.out")) { cout << "mdse.out already exists. Enter 'y' to overwrite, 'n' to exit: "; string answer; cin >> answer; if(answer != "y"){ cout << "Stopping." << endl; return false; } } fileInp.open("mdse.inp"); if(fileInp.good() == false) { cout << "mdse.inp couldn't be opened for reading. Stopping." << endl; return false; } fileOut.open("mdse.out"); if(fileInp.good() == false) { cout << "mdse.out couldn't be opened for writing. Stopping." << endl; return false; } fileOut << fixed << setprecision(5); fileEne.open("mdse.ene"); if(fileEne.good() == false) { cout << "mdse.ene couldn't be opened for writing. Stopping." << endl; return false; } fileEne << fixed << setprecision(5); filePic.open("mdse.pic"); if(filePic.good() == false) { cout << "mdse.pic couldn't be opened for writing. Stopping." << endl; return false; } filePic << fixed << setprecision(5); fileBs.open("mdse.bs"); if(fileBs.good() == false) { cout << "mdse.bs couldn't be opened for writing. Stopping." << endl; return false; } fileBs << fixed << setprecision(5); return true; } bool FileExists(const string& filename) { struct stat buf; if (stat(filename.c_str(), &buf) != -1) { return true; } return false; } bool Input() { // Potential parameters for Cu /*RM = 63.546; DT = 0.9E-15; A1 = 110.766008; A2 = -46.1649783; RL1 = 2.09045946; RL2 = 1.49853083; AL1 = 0.394142248; AL2 = 0.207225507; D21 = 0.436092895; D22 = 0.245082238; */ // Potential parameters for Au RM=196.9665; DT=1.6E-15; A1=345.923364; A2=-38.9245908; RL1=1.0428923; RL2=1.05974062; AL1=0.750775965; AL2=0.229377368; D21=0.888911352; D22=0.254280292; double FACM = 0.103655772E-27; BK = 8.617385E-05; RM = RM * FACM; try { // Read the title Title = ReadLine(); // Skip the second line ReadLine(); // Read MDSL, IAVL, IPPL, ISCAL, IPD, TE, NA, LAYER, IPBC, PP(1), PP(2), PP(3) MDSL = GetValueInt(); IAVL = GetValueInt(); IPPL = GetValueInt(); ISCAL = GetValueInt(); IPD = GetValueInt(); TE = GetValueDouble(); NA = GetValueInt(); LAYER = GetValueInt(); IPBC = GetValueInt(); PP[0] = GetValueDouble(); PP[1] = GetValueDouble(); PP[2] = GetValueDouble(); // Generate atom coordinates GenerateLatis(); // Sort atoms by the z axis SortAtoms('Z'); // Find the periodic boundary limits if PBC is applied FindBoundaries(); } catch(exception& e) { cout << "Error in Input(): " << e.what() << endl; return false; } return true; } bool SolveCpu() { // Initialize some variables and define some factors MDS = 0; // Current md simulation step; int IPP=0; // Print counter double EPAV = 0; // Average potential energy double EKAV = 0; // Average kinetic energy double ETAV = 0; // Average total energy double SCFAV = 0; // Average scaling factor TCALAV = 0; // System temperature int IAV = 0; // Average counter int ISCA = 0; // Scaling counter double FFPR[MAX_ATOMS][3]; // Array to store forces from previous step // Calculate the initial potential energy of each atom and the initial force that each atom experiences ForceCpu(); // SET INITIAL VELOCITIES ACC. TO MAXWELL VEL. DISTRIBUTION MaxWell(); // Printing initially distributed velocities, potential energies, forces, total energy and temperature PrintInitial(); fileOut << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl; fileOut << "# MDS EPAV EKAV ETAV TCALAV" << endl; fileOut << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl; fileEne << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl; fileEne << "# MDS EPAV EKAV ETAV TCALAV" << endl; fileEne << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl; // Start Md Steps while(MDS < MDSL){ MDS++; IPP++; ISCA++; // Show status at each 100 steps if((MDS % 100) == 0) ShowStatus(); // Reposition the particles if PBC is applied if(IPBC != 0) Reposition(); // Calculate velocity and position of the particles using the velocity summed form of verlet algorithm (NVE MD velocity form) ForceCpu(); // Compute the positions at time step n+1 as: // ri(n+1)=ri(n)+hvi(n)+(1/2m)h2Fi(n) for(int i=0; i<NA; i++){ X[i] = X[i] + DT*VV[i][0] + (pow(DT,2)*FF[i][0]) / (2*RM); Y[i] = Y[i] + DT*VV[i][1] + (pow(DT,2)*FF[i][1]) / (2*RM); Z[i] = Z[i] + DT*VV[i][2] + (pow(DT,2)*FF[i][2]) / (2*RM); } // Store the forces at time step Fi(n) memcpy(FFPR, FF, NA*3*sizeof(double)); //for(int i=0; i<NA; i++){ // for(int j=0; j<3; j++){ // FFPR[i][j] = FF[i][j]; // } //} ForceCpu(); // Compute the velocities at time step n+1 as // vi(n+1)=vi(n)+(h/2m)(Fi(n+1)+Fi(n)) // Calculate the temperature that system reached by calculating the kinetic energy of each atom EKINA = 0; for(int i=0; i<NA; i++){ VV[i][0] += DT * (FF[i][0]+FFPR[i][0]) / (2*RM); VV[i][1] += DT * (FF[i][1]+FFPR[i][1]) / (2*RM); VV[i][2] += DT * (FF[i][2]+FFPR[i][2]) / (2*RM); EKINA += pow(VV[i][0],2) + pow(VV[i][1],2) + pow(VV[i][2],2); } EKINA *= RM; TCALC = EKINA / (3*NA*BK); // Calculate the scaling factor and scale the velocities SCFAC = sqrt(TE/TCALC); if(ISCA == ISCAL) { EKIN = 0; for(int i=0; i<NA; i++){ VV[i][0] *= SCFAC; VV[i][1] *= SCFAC; VV[i][2] *= SCFAC; EKIN += pow(VV[i][0],2) + pow(VV[i][1],2) + pow(VV[i][2],2); } ISCA = 0; EKIN *= RM; TCALC = EKIN / (3*NA*BK); } // Calculate total energy ETOT = EPOT + EKINA; // Calculate the averages of EPOT, EKINA, ETOT, SCFAC AND TCALC EPAV += EPOT; EKAV += EKINA; ETAV += ETOT; SCFAV += SCFAC; TCALAV += TCALC; IAV++; if(IAV < IAVL) continue; EPAV /= IAVL; EKAV /= IAVL; ETAV /= IAVL; SCFAV /= IAVL; TCALAV /= IAVL; // Print the averages fileOut << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed; fileEne << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed; // Periodic printing of coordinates if(IPP == IPPL){ PrintPeriodic(); IPP = 0; } IAV = 0; EPAV = 0; EKAV = 0; ETAV = 0; SCFAV = 0; TCALAV = 0; } // Md Steps Loop PrintFinal(); return true; } bool SolveCuda(int kernelStepCount) { // Initialize some variables and define some factors MDS = 0; // Current md simulation step; int IPP=0; // Print counter double EPAV = 0; // Average potential energy double EKAV = 0; // Average kinetic energy double ETAV = 0; // Average total energy double SCFAV = 0; // Average scaling factor TCALAV = 0; // System temperature int IAV = 0; // Average counter int ISCA = 0; // Scaling counter // Use floats for cuda int sizeNA = NA * sizeof(float); CuErr( cudaMallocHost(&h_X, sizeNA)); CuErr( cudaMallocHost(&h_Y, sizeNA)); CuErr( cudaMallocHost(&h_Z, sizeNA)); for(int i=0; i<NA; i++){ h_X[i] = (float)X[i]; h_Y[i] = (float)Y[i]; h_Z[i] = (float)Z[i]; } // Calculate the initial potential energy of each atom and the initial force that each atom experiences ForceCpu(); // SET INITIAL VELOCITIES ACC. TO MAXWELL VEL. DISTRIBUTION MaxWell(); // Printing initially distributed velocities, potential energies, forces, total energy and temperature PrintInitial(); fileOut << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl; fileOut << "# MDS EPAV EKAV ETAV TCALAV" << endl; fileOut << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl; fileEne << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl; fileEne << "# MDS EPAV EKAV ETAV TCALAV" << endl; fileEne << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl; // Start Md Steps while(MDS < MDSL){ MDS += kernelStepCount; IPP += kernelStepCount; ISCA += kernelStepCount; IAV += kernelStepCount; LaunchCudaKernel(kernelStepCount); // Show status at each 100 steps ShowStatus(); // Averages if(IAV >= IAVL) { EPAV /= IAVL; EKAV /= IAVL; ETAV /= IAVL; SCFAV /= IAVL; TCALAV /= IAVL; // Print the averages fileOut << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed; fileEne << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed; IAV = 0; EPAV = 0; EKAV = 0; ETAV = 0; SCFAV = 0; TCALAV = 0; } // Periodic printing of coordinates if(IPP >= IPPL){ PrintPeriodic(); IPP = 0; } } // Md Steps Loop // Used floats for cuda for(int i=0; i<NA; i++){ X[i] = h_X[i]; Y[i] = h_Y[i]; Z[i] = h_Z[i]; } PrintFinal(); return true; } bool CloseFiles() { fileInp.close(); fileOut.close(); fileEne.close(); filePic.close(); fileBs.close(); CuErr( cudaFreeHost(h_FFX)); CuErr( cudaFreeHost(h_FFY)); CuErr( cudaFreeHost(h_FFZ)); CuErr( cudaFreeHost(h_Params)); CuErr( cudaFree(d_FFX)); CuErr( cudaFree(d_FFY)); CuErr( cudaFree(d_FFZ)); CuErr( cudaFree(d_EE)); CuErr( cudaFree(d_X)); CuErr( cudaFree(d_Y)); CuErr( cudaFree(d_Z)); CuErr( cudaFree(d_Params)); return true; } void ShowStatus() { cout << "\rMDS Steps: " << MDS << " of " << MDSL; } string GetTime() { time_t rawtime; struct tm * timeinfo; char chars[100]; time ( &rawtime ); timeinfo = localtime ( &rawtime ); strftime (chars, 100, "%Y.%m.%d %H:%M:%S", timeinfo); string final = " DATE AND TIME: "; final += chars; return final; } void ForceCpu() { double E2 = 0; // Total energy double XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2; double ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ; double FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ; int i, j; #pragma omp parallel for private(i,j,EPP,FX2,FY2,FZ2,RIJ,RIJ2,XIJ,YIJ,ZIJ,ARG1,ARG2,EXP1,EXP2,UIJ1,UIJ2,UIJ,FAC1,FAC2,FAC12,XRIJ,YRIJ,ZRIJ) reduction(+:E2) for(i=0; i<NA; i++) { EE[i] = 0; EPP = 0; //Forces that effect atoms indexed with i in all three axes FX2 = 0; FY2 = 0; FZ2 = 0; for(j=0; j<NA; j++) { if(i == j) continue; // Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ Period(i, j, XIJ, YIJ, ZIJ, RIJ2, RIJ); // Calculate potential energy U(r) ARG1 = AL1*RIJ2; ARG2 = AL2*RIJ2; EXP1 = exp(-ARG1); EXP2 = exp(-ARG2); UIJ1 = A1*EXP1/(pow(RIJ,RL1)); UIJ2 = A2*EXP2/(pow(RIJ,RL2)); UIJ = D21*UIJ1 + D22*UIJ2; EPP += UIJ; // Calculate forces FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ); FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ); FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2; XRIJ = XIJ/RIJ; YRIJ = YIJ/RIJ; ZRIJ = ZIJ/RIJ; FX2 += FAC12*XRIJ; FY2 += FAC12*YRIJ; FZ2 += FAC12*ZRIJ; } FF[i][0] = -FX2; FF[i][1] = -FY2; FF[i][2] = -FZ2; EE[i] = EPP; E2 += EPP; //FFF[i] = sqrt(FF[i][0]*FF[i][0] + FF[i][1]*FF[i][1] + FF[i][2]*FF[i][2]); } EPOT = E2; } void LaunchCudaKernel(int numberOfSteps) { int sizeNA = NA * sizeof(float); // Pointers are global, allocating once is enough if(h_FFX == NULL){ CuErr( cudaMallocHost(&h_FFX, sizeNA)); CuErr( cudaMallocHost(&h_FFY, sizeNA)); CuErr( cudaMallocHost(&h_FFZ, sizeNA)); CuErr( cudaMalloc(&d_FFX, sizeNA)); CuErr( cudaMalloc(&d_FFY, sizeNA)); CuErr( cudaMalloc(&d_FFZ, sizeNA)); CuErr( cudaMalloc(&d_EE, sizeNA)); CuErr( cudaMalloc(&d_X, sizeNA)); CuErr( cudaMalloc(&d_Y, sizeNA)); CuErr( cudaMalloc(&d_Z, sizeNA)); //cudaChannelFormatDesc chanDouble = cudaCreateChannelDesc<double>(); //CuErr( cudaBindTexture(0, &texX, d_X, &chanDouble, sizeNA)); //CuErr( cudaBindTexture(0, &texY, d_Y, &chanDouble, sizeNA)); //CuErr( cudaBindTexture(0, &texZ, d_Z, &chanDouble, sizeNA)); } CuErr( cudaMemcpy(d_X, h_X, sizeNA, cudaMemcpyHostToDevice)); CuErr( cudaMemcpy(d_Y, h_Y, sizeNA, cudaMemcpyHostToDevice)); CuErr( cudaMemcpy(d_Z, h_Z, sizeNA, cudaMemcpyHostToDevice)); int blockSize = 32; int numBlocks = NA / blockSize + (NA % blockSize == 0 ? 0:1); mdKernel <<< numBlocks, blockSize >>> (NA, d_FFX, d_FFY, d_FFZ, d_EE, d_X, d_Y, d_Z, IPBC, PP[0], PP[1], PP[2], AL1, AL2, A1, A2, RL1, RL2, D21, D22, PA[0], PA[1], PA[2], PB[0], PB[1], PB[2], PL[0], PL[1], PL[2]); CuErrC("kernelForce kernel execution failed"); CuErr( cudaMemcpy(X, d_X, sizeNA, cudaMemcpyDeviceToHost)); CuErr( cudaMemcpy(Y, d_Y, sizeNA, cudaMemcpyDeviceToHost)); CuErr( cudaMemcpy(Z, d_Z, sizeNA, cudaMemcpyDeviceToHost)); CuErr( cudaMemcpy(h_FFX, d_FFX, sizeNA, cudaMemcpyDeviceToHost)); CuErr( cudaMemcpy(h_FFY, d_FFY, sizeNA, cudaMemcpyDeviceToHost)); CuErr( cudaMemcpy(h_FFZ, d_FFZ, sizeNA, cudaMemcpyDeviceToHost)); CuErr( cudaMemcpy(EE, d_EE, sizeNA, cudaMemcpyDeviceToHost)); EPOT = 0; for(int i=0; i<NA; i++){ FF[i][0] = h_FFX[i]; FF[i][1] = h_FFY[i]; FF[i][2] = h_FFZ[i]; EPOT += EE[i]; } } void FindBoundaries() { if(IPBC == 0) return; for(int i=0; i<3; i++) PL[i] = PP[i] / 2.0; // Find smallest coordinates for X, Y and Z coordinates PA[0] = X[0]; PA[1] = Y[0]; PA[2] = Z[0]; for(int i=1; i<NN; i++) { if(PA[0] > X[i]) PA[0] = X[i]; if(PA[1] > Y[i]) PA[1] = Y[i]; if(PA[2] > Z[i]) PA[2] = Z[i]; } // Find ending coordinates of working system PB[0] = PA[0] + PP[0]; PB[1] = PA[1] + PP[1]; PB[2] = PA[2] + PP[2]; } // PRINTING OF POSITIONS, FORCES, AND ENERGIES void PrintCoordinatesForcesEnergy(){ fileOut << " I X Y Z FX FY FZ EE" << endl; fileOut << " ------ --------- ------------ ------------ ------------ ------------ ------------ ------------" << endl << endl; for(int i=0; i<NA; i++){ fileOut << setw(6) << i+1; fileOut << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << " "; fileOut << scientific << setw(13) << FF[i][0] << " " << setw(13) << FF[i][1] << " " << setw(13) << FF[i][2] << " " << setw(13) << EE[i]; fileOut << fixed << endl; } } void PrintInitial() { string str; fileInp.clear(); fileInp.seekg(0, ios::beg); if(PSilent == false) cout << "Simulation started" << endl; fileOut << "******************************************************************************************" << endl; fileOut << Title << endl; fileOut << "******************************************************************************************" << endl << endl; fileOut << GetTime() << endl << endl; tStart = clock(); getline(fileInp, str); getline(fileInp, str); fileOut << str << endl; getline(fileInp, str); fileOut << str << endl << endl; getline(fileInp, str); fileOut << " INITIAL COORDINATES:" << endl; for(int i=0; i<LAYER; i++){ getline(fileInp, str); fileOut << str << endl; } fileOut << "******************************************************************************************" << endl << endl; fileOut << " NUMBER OF MOVING ATOMS: NA= " << NA << endl; fileOut << " NUMBER OF TOTAL ATOMS: NN= " << NN << endl << endl; fileOut << " INITIAL COORDINATES OF ALL ATOMS: (X,Y,Z)" << endl << endl; for(int i=0; i<NN; i++){ fileOut << setw(5) << i+1 << " " << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl; } fileOut << "******************************************************************************************" << endl << endl; fileOut << endl << " INITIAL COORDINATES, FORCES AND ENERGIES:" << endl << endl; PrintCoordinatesForcesEnergy(); fileOut << endl << scientific; fileOut << " EPOT=" << EPOT << " EKIN=" << EKIN << " TCALC=" << TCALC << " SCFAC=" << SCFAC << endl << endl << fixed; } void PrintPeriodic() { fileOut << endl << endl << " PERIODIC PRINTING OF COORDINATES, FORCES AND ENERGIES AT MDS: " << MDS << endl << endl; PrintCoordinatesForcesEnergy(); fileOut << endl << scientific; fileOut << " EPOT=" << EPOT << " EKIN=" << EKIN << " TCALC=" << TCALC; fileOut << " SCFAC=" << SCFAC << endl << endl << fixed; } void PrintFinal() { if(IPBC != 0) Reposition(); fileOut << endl << endl << " FINAL COORDINATES, FORCES AND ENERGIES:" << endl << endl; PrintCoordinatesForcesEnergy(); fileOut << endl << scientific; fileOut << " EPOT=" << EPOT << " EKINA=" << EKINA << " ETOT=" << ETOT << " TCALC=" << TCALC << endl << endl << fixed; PrintElapsedTime(); fileOut << " *************** END OF THE CALCULATION ***************"; WritePicFile(); WriteBsFile(); if(PSilent == false) cout << endl << "Simulation complete" << endl; } void PrintElapsedTime() { // Write current time fileOut << endl << GetTime() << endl << endl; // Calculate and write elapsed time tStop = clock(); float seconds = float(tStop - tStart)/CLOCKS_PER_SEC; int minutes = int(seconds/60); seconds -= minutes*60; int hours = minutes/60; minutes -= hours*60; fileOut << " ELAPSED TIME: " << hours << " HOURS " << minutes << " MINUTES " << seconds << " SECONDS" << endl << endl; } // RANDOM NUMBER GENERATOR, GENERATES RN IN THE INTERVAL (-1,1) double Randum(double U, double S) { U = 23*U + 0.21132486579; if((U-1.0) >= 0) U = U - int(U); if(U > 0.5) S = -S; U = U-int(U); return (S * U); } // DISTRUBUTES THE VELOCITIES FOR THE ATOMS FOR THE SPECIFIED // TEMPERATURE TE ACCORDING TO THE MAXWELL VELOCITY DISTRIBUTION void MaxWell() { double FAC1 = sqrt(3.0*BK*TE/RM); double U = 0.0; double S = 1.0; double VVX = 0.0; double VVY = 0.0; double VVZ = 0.0; double FAC2 = (2.0/3.0) * FAC1; FAC2 /= sqrt(3.0); // EQUATING Vmean TO FAC2 for(int i=0; i<NA; i++){ for(int j=0; j<3; j++){ VV[i][j] = (FAC2 - FAC2*Randum(U,S)); } } // CALCULATING AVERAGES double VVV = 0.0; for(int i=0; i<NA; i++){ VVX = VVX + VV[i][0]; VVY = VVY + VV[i][1]; VVZ = VVZ + VV[i][2]; } VVX /= NA; VVY /= NA; VVZ /= NA; VVV = VVX*VVX + VVY*VVY + VVZ*VVZ; double COSX = VVX / sqrt(VVV); double COSY = VVY / sqrt(VVV); double COSZ = VVZ / sqrt(VVV); // CALCULATING EKIN AND TEMPERATURE WRT THE CALCULATED Vmean EKIN = 0.5 * RM * (VVV * (9.0/4.0)); TCALC = EKIN / (1.5 * BK); // CALCULATING THE SCALING FACTOR SCFAC = sqrt(TE / TCALC); // REDISTRIBUTING THE INITIAL VELOCITIES WRT SCALING FACTOR VVV = sqrt(VVV); double VVXNEW = COSX * VVV * SCFAC; double VVYNEW = COSY * VVV * SCFAC; double VVZNEW = COSZ * VVV * SCFAC; double XSCALE = (VVXNEW-VVX); double YSCALE = (VVYNEW-VVY); double ZSCALE = (VVZNEW-VVZ); for(int i=0; i<NA; i++){ VV[i][0] += XSCALE; VV[i][1] += YSCALE; VV[i][2] += ZSCALE; VT[i] = pow(VV[i][0],2.0) + pow(VV[i][1],2) + pow(VV[i][2],2); } // CALCULATING AVERAGES OF SCALED VELOCITIES VVX = 0; VVY = 0; VVZ = 0; for(int i=0; i<NA; i++){ VVX += VV[i][0]; VVY += VV[i][1]; VVZ += VV[i][2]; } VVX /= NA; VVY /= NA; VVZ /= NA; // CALCULATING EKIN AND TEMPERATURE WRT THE SCALED Vmean VVV = VVX*VVX + VVY*VVY + VVZ*VVZ; EKIN = 0.5 * RM * (VVV * (9/4)); TCALC = EKIN / (1.5 * BK); ETOT = EPOT + EKIN; } // REPOSITIONS COORDINATES WHEN ANY MOVING ATOM CROSSES THE BOUNDARY. void Reposition() { double PAPL, H, B; if(PP[0] > 0){ PAPL = PA[0] + PL[0]; for(int i=0; i<NA; i++){ H = (X[i]-PAPL) / PL[0]; B = H - 2.0*int(H); X[i] = B*PL[0] + PAPL; } } if(PP[1] > 0){ PAPL = PA[1] + PL[1]; for(int i=0; i<NA; i++){ H = (Y[i]-PAPL) / PL[1]; B = H - 2.0*int(H); Y[i] = B*PL[1] + PAPL; } } if(PP[2] > 0){ PAPL = PA[2] + PL[2]; for(int i=0; i<NA; i++){ H = (Z[i]-PAPL) / PL[2]; B = H - 2.0*int(H); Z[i] = B*PL[2] + PAPL; } } } // Sorts atoms by the given axis void SortAtoms(char sortAxis) { double *sortArray; if(sortAxis == 'X') sortArray = X; else if(sortAxis == 'Y') sortArray = Y; else sortArray = Z; double tempX, tempY, tempZ; for (int i = 0; i < NA; i++) { for (int j = i+1; j < NA; j++) { if (sortArray[i] > sortArray[j]) { tempX = X[i]; tempY = Y[i]; tempZ = Z[i]; X[i] = X[j]; Y[i] = Y[j]; Z[i] = Z[j]; X[j] = tempX; Y[j] = tempY; Z[j] = tempZ; } } } } // Generates the atoms according to coordinates and repeat parameters from the input // In the input, the first 3 numbers are x,y,z coordinates, the second 3 numbers are unit cell lengths // and the last 3 numbers specify how many times to copy that atom in x,y,z direction void GenerateLatis() { // Skip the first line: (W(J,K),K=1,6),(NO(J,K),K=1,3) ReadLine(); NN = 0; for(int i=0; i<LAYER; i++) { double coordinateX = GetValueDouble(); double coordinateY = GetValueDouble(); double coordinateZ = GetValueDouble(); double unitCellLengthX = GetValueDouble(); double unitCellLengthY = GetValueDouble(); double unitCellLengthZ = GetValueDouble(); int multiplierX = GetValueInt(); int multiplierY = GetValueInt(); int multiplierZ = GetValueInt(); for (int iX = 0; iX < multiplierX; iX++) { for (int iY = 0; iY < multiplierY; iY++) { for (int iZ = 0; iZ < multiplierZ; iZ++) { double newCoordinateX = coordinateX + (iX * unitCellLengthX); double newCoordinateY = coordinateY + (iY * unitCellLengthY); double newCoordinateZ = coordinateZ + (iZ * unitCellLengthZ); X[NN] = newCoordinateX; Y[NN] = newCoordinateY; Z[NN] = newCoordinateZ; NN++; if(NN > MAX_ATOMS) { cout << "The number of atoms cannot exceed " << MAX_ATOMS << ". Stopping."; exit(1); } } } } } if (NN != NA) cout << "Warning: number of total atoms NN is different from number of moving atoms NA." << endl; } string GetValue() { SkipSpace(); string val = ""; char c; do { fileInp.get(c); val += c; } while ((c != ' ') && (c != ',') && (c != '\n') && (c != '\r') && (fileInp.eof() != true)); val = val.substr(0, val.size() - 1); return val; } int GetValueInt() { string str = GetValue(); int result = 0; bool success = (stringstream(str) >> result); if(success == false) { cout << "Error converting input to integer. Stopping." << endl; exit(1); } return result; } double GetValueDouble() { string str = GetValue(); double result = 0; bool success = (stringstream(str) >> result); if(success == false) { cout << "Error converting input to double. Stopping." << endl; exit(1); } return result; } float GetValueFloat() { string str = GetValue(); float result = 0; bool success = (stringstream(str) >> result); if(success == false) { cout << "Error converting input to double. Stopping." << endl; exit(1); } return result; } string SkipSpace() { string val = ""; char c; do { fileInp.get(c); val += c; } while ((c == ' ') || (c == ',') || (c == '\n') || (c == '\r')); val = val.substr(0, val.size() - 1); fileInp.unget(); return val; } string ReadLine() { string line = ""; getline(fileInp, line); return line; } // Calculates interatomic distance between atoms I and J double Distance(int i, int j) { double XX = X[i] - X[j]; double YY = Y[i] - Y[j]; double ZZ = Z[i] - Z[j]; return XX*XX + YY*YY + ZZ*ZZ; } void WritePicFile() { double EB = EPOT / NA; filePic << " NN=" << NN << " NA=" << NA << " TOTPE=" << EPOT << " APEPP=" << EB << endl; for(int i=0; i<NA; i++){ filePic << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl; } } void WriteBsFile() { for(int i=0; i<NA; i++){ fileBs << "atom Au " << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl; } fileBs << "" << endl; fileBs << "spec Au 0.50 1 0.75 0" << endl; // "atom size" "color codes rgb" fileBs << "" << endl; fileBs << "bonds Au Au 1.0 4.05 0.03 0.5 0.7 0.9" << endl; // Bond "min length" "max length" "line width" "color codes rgb" fileBs << "" << endl; fileBs << "tmat 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" << endl; fileBs << "dist 100.0" << endl; fileBs << "inc 1.0" << endl; fileBs << "scale 40.0" << endl; fileBs << "rfac 1.0" << endl; fileBs << "bfac 1.0" << endl; fileBs << "switches 1 0 1 0 0 1 1 0 0" << endl; fileBs << "" << endl; } // Apply periodic boundry condition and find distances between the two particles // Because of the periodic boundary, the distance may be the one in this working system or the particle in the adjacent system. void Period(int i, int j, double &XIJ, double &YIJ, double &ZIJ, double &RIJ2, double &RIJ) { XIJ = X[i] - X[j]; YIJ = Y[i] - Y[j]; ZIJ = Z[i] - Z[j]; double DD, ID; if(IPBC != 0){ if(PP[0] > 0){ DD = XIJ / PP[0]; ID = int(DD); XIJ = XIJ - PP[0]*(ID+int(2.0*(DD-ID))); } if(PP[1] > 0){ DD = YIJ / PP[1]; ID = int(DD); YIJ = YIJ - PP[1]*(ID+int(2.0*(DD-ID))); } if(PP[2] > 0){ DD = ZIJ / PP[2]; ID = int(DD); ZIJ = ZIJ - PP[2]*(ID+int(2.0*(DD-ID))); } } RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ; RIJ = sqrt(RIJ2); } // Check program starting parameters bool CheckParameters(int argc, char* argv[]) { PSilent = false; OnlyCpu = false; SetPriorityClass(GetCurrentProcess(),IDLE_PRIORITY_CLASS); for(int i=1; i<argc; i++) { string parameter = argv[i]; if(parameter == "-help"){ cout << "Use parameter '-s' for silent mode. No output will be given to the console." << endl; cout << "Use parameter '-cpu' for cpu calculations only (otherwise Nvidia graphics card required)." << endl; return false; } else if(parameter == "-s"){ PSilent = true; } else if(parameter == "-cpu"){ OnlyCpu = true; cout << "-cpu option received. Will use only cpu for computations." << endl; } } return true; }
b13f496aff1b73b8582c1269c63765418cc3d036.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Actually, there are no rounding errors due to results being accumulated in an arbitrary order.. // Therefore EPSILON = 0.0f is OK #define EPSILON 0.001f #define EPSILOND 0.0000001 extern "C" __global__ void compare(float *C, int *faultyElems, size_t iters) { size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y; size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y gridDim.x*blockDim.x + // W blockIdx.x*blockDim.x + threadIdx.x; // X int myFaulty = 0; for (size_t i = 1; i < iters; ++i) if (fabsf(C[myIndex] - C[myIndex + i*iterStep]) > EPSILON) myFaulty++; atomicAdd(faultyElems, myFaulty); } extern "C" __global__ void compareD(double *C, int *faultyElems, size_t iters) { size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y; size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y gridDim.x*blockDim.x + // W blockIdx.x*blockDim.x + threadIdx.x; // X int myFaulty = 0; for (size_t i = 1; i < iters; ++i) if (fabs(C[myIndex] - C[myIndex + i*iterStep]) > EPSILOND) myFaulty++; atomicAdd(faultyElems, myFaulty); }
b13f496aff1b73b8582c1269c63765418cc3d036.cu
// Actually, there are no rounding errors due to results being accumulated in an arbitrary order.. // Therefore EPSILON = 0.0f is OK #define EPSILON 0.001f #define EPSILOND 0.0000001 extern "C" __global__ void compare(float *C, int *faultyElems, size_t iters) { size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y; size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y gridDim.x*blockDim.x + // W blockIdx.x*blockDim.x + threadIdx.x; // X int myFaulty = 0; for (size_t i = 1; i < iters; ++i) if (fabsf(C[myIndex] - C[myIndex + i*iterStep]) > EPSILON) myFaulty++; atomicAdd(faultyElems, myFaulty); } extern "C" __global__ void compareD(double *C, int *faultyElems, size_t iters) { size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y; size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y gridDim.x*blockDim.x + // W blockIdx.x*blockDim.x + threadIdx.x; // X int myFaulty = 0; for (size_t i = 1; i < iters; ++i) if (fabs(C[myIndex] - C[myIndex + i*iterStep]) > EPSILOND) myFaulty++; atomicAdd(faultyElems, myFaulty); }
03060617efe181c6df40eedbc17c2178b9a2d2f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 07.03.2019 // #include <ops/declarable/helpers/gather.h> #include <numeric> #include <PointersManager.h> #include <ShapeUtils.h> namespace nd4j { namespace ops { namespace helpers { template<typename X, typename Y> __global__ static void gatherCudaLinearKernel(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { __shared__ const X* x; __shared__ const Y* y; __shared__ X* z; __shared__ Nd4jLong xLen, yLen, zLen; if (threadIdx.x == 0) { x = reinterpret_cast<const X*>(vx); z = reinterpret_cast<X*>(vz); y = reinterpret_cast<const Y *>(vy); xLen = shape::length(xShapeInfo); yLen = shape::length(yShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); //const Nd4jLong zLen = shape::length(zShapeInfo); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int j = start; j < zLen; j += step) { auto zIndex = shape::getIndexOffset(j, zShapeInfo, zLen); auto yIndex = shape::getIndexOffset(j, yShapeInfo, yLen); auto xIndex = shape::getIndexOffset(y[yIndex], xShapeInfo, xLen); //printf("%lld , %lld\n", zIndex, xIndex); z[zIndex] = x[xIndex]; } } ////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ static void gatherCuda(const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) { const Y* y = reinterpret_cast<const Y*>(vy); __shared__ const X* x; __shared__ X* z; const Nd4jLong len = shape::length(xShapeInfo); //const Nd4jLong zLen = shape::length(zShapeInfo); for (int i = blockIdx.x; i < numOfSubArrs; i += gridDim.x) { if (threadIdx.x == 0) { x = reinterpret_cast<const X*>(vx) + xOffsets[y[shape::getIndexOffset(i, yShapeInfo, numOfSubArrs)]]; z = reinterpret_cast<X*>(vz) + zOffsets[i]; } __syncthreads(); for (int j = threadIdx.x; j < len; j += blockDim.x) { auto zIndex = shape::getIndexOffset(j, zShapeInfo, len); auto xIndex = shape::getIndexOffset(j, xShapeInfo, len); //printf("%lld , %lld\n", zIndex, xIndex); z[zIndex] = x[xIndex]; } __syncthreads(); } } template<typename X, typename Y> __host__ static void gatherCudaLinear(const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { hipLaunchKernelGGL(( gatherCudaLinearKernel<X,Y>), dim3(128), dim3(256), 1024, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } ////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __host__ static void gatherCudaLauncher(const hipStream_t *stream, const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) { hipLaunchKernelGGL(( gatherCuda<X,Y>), dim3(numOfSubArrs), dim3(MAX_NUM_THREADS), 1024, *stream, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, vz, zShapeInfo, zOffsets); } ////////////////////////////////////////////////////////////////////// void gather(nd4j::LaunchContext * context, const NDArray* input, const NDArray* indices, NDArray* output, const std::vector<int>& intArgs) { const int inputRank = input->rankOf(); int axis = intArgs.size() > 0 ? intArgs[0] : 0; if(axis < 0) axis += inputRank; const int numOfIntArgs = intArgs.size(); if (indices == nullptr && numOfIntArgs == 2) { // scalar case output->assign((*input)(intArgs[1], {axis})); } else if (indices != nullptr && indices->isScalar()) { if(input->rankOf() <= 1) { //For scalar indices, rank 0 or 1 input: can't do tensor along dimension 0 as this is whole array... instead, we want to get a scalar auto idx = indices->e<Nd4jLong>(0); auto scalarNDArray = input->e(idx); output->assign(scalarNDArray); } else { NDArray inSubArr = (*input)(indices->e<Nd4jLong>(0), {axis}); output->assign(inSubArr); } } else { NDArray* pIndices = const_cast<NDArray*>(indices); if(indices == nullptr) pIndices = new NDArray(input->ordering(), {numOfIntArgs-1}, std::vector<double>(intArgs.begin() + 1, intArgs.end()), DataType::INT64, input->getContext()); std::vector<int> dimsOut(pIndices->rankOf()); std::iota(dimsOut.begin(), dimsOut.end(), axis); // fill with axis, axis+1, ... axis+pIndices->rankOf()-1 const Nd4jLong numOfSubArrs = pIndices->lengthOf(); Nd4jLong *outSubArrShapeInfo(nullptr), *inSubArrShapeInfo(nullptr), *outSubArrOffsets(nullptr), *inSubArrOffsets(nullptr); input-> getSubArrShapeAndOffsets({axis}, inSubArrShapeInfo, inSubArrOffsets); output->getSubArrShapeAndOffsets(dimsOut, outSubArrShapeInfo, outSubArrOffsets); if (output->rankOf() > 1) { PointersManager manager(context, "gather"); auto xShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrShapeInfo, shape::shapeInfoByteLength( inSubArrShapeInfo))); auto zShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrShapeInfo, shape::shapeInfoByteLength( outSubArrShapeInfo))); auto xOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrOffsets, (input->lengthOf() / shape::length( inSubArrShapeInfo)) * sizeof(Nd4jLong))); auto zOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrOffsets, (output->lengthOf() / shape::length(outSubArrShapeInfo)) * sizeof(Nd4jLong))); NDArray::prepareSpecialUse({output}, {input, pIndices}); BUILD_DOUBLE_SELECTOR(input->dataType(), pIndices->dataType(), gatherCudaLauncher, (context->getCudaStream(), numOfSubArrs, input->getSpecialBuffer(), xShapeInfo, xOffsets, pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->getSpecialBuffer(), zShapeInfo, zOffsets), NUMERIC_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({output}, {input, pIndices}); manager.synchronize(); } else { NDArray::prepareSpecialUse({output}, {input, pIndices}); BUILD_DOUBLE_SELECTOR(input->dataType(), pIndices->dataType(), gatherCudaLinear, (context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()), NUMERIC_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({output}, {input, pIndices}); } if(indices == nullptr) delete pIndices; } } BUILD_DOUBLE_TEMPLATE(template void gatherCudaLauncher, (const hipStream_t *stream, const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets), NUMERIC_TYPES, INTEGER_TYPES); BUILD_DOUBLE_TEMPLATE(template void gatherCudaLinear, (const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo), NUMERIC_TYPES, INTEGER_TYPES); } } }
03060617efe181c6df40eedbc17c2178b9a2d2f9.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 07.03.2019 // #include <ops/declarable/helpers/gather.h> #include <numeric> #include <PointersManager.h> #include <ShapeUtils.h> namespace nd4j { namespace ops { namespace helpers { template<typename X, typename Y> __global__ static void gatherCudaLinearKernel(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { __shared__ const X* x; __shared__ const Y* y; __shared__ X* z; __shared__ Nd4jLong xLen, yLen, zLen; if (threadIdx.x == 0) { x = reinterpret_cast<const X*>(vx); z = reinterpret_cast<X*>(vz); y = reinterpret_cast<const Y *>(vy); xLen = shape::length(xShapeInfo); yLen = shape::length(yShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); //const Nd4jLong zLen = shape::length(zShapeInfo); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int j = start; j < zLen; j += step) { auto zIndex = shape::getIndexOffset(j, zShapeInfo, zLen); auto yIndex = shape::getIndexOffset(j, yShapeInfo, yLen); auto xIndex = shape::getIndexOffset(y[yIndex], xShapeInfo, xLen); //printf("%lld , %lld\n", zIndex, xIndex); z[zIndex] = x[xIndex]; } } ////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ static void gatherCuda(const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) { const Y* y = reinterpret_cast<const Y*>(vy); __shared__ const X* x; __shared__ X* z; const Nd4jLong len = shape::length(xShapeInfo); //const Nd4jLong zLen = shape::length(zShapeInfo); for (int i = blockIdx.x; i < numOfSubArrs; i += gridDim.x) { if (threadIdx.x == 0) { x = reinterpret_cast<const X*>(vx) + xOffsets[y[shape::getIndexOffset(i, yShapeInfo, numOfSubArrs)]]; z = reinterpret_cast<X*>(vz) + zOffsets[i]; } __syncthreads(); for (int j = threadIdx.x; j < len; j += blockDim.x) { auto zIndex = shape::getIndexOffset(j, zShapeInfo, len); auto xIndex = shape::getIndexOffset(j, xShapeInfo, len); //printf("%lld , %lld\n", zIndex, xIndex); z[zIndex] = x[xIndex]; } __syncthreads(); } } template<typename X, typename Y> __host__ static void gatherCudaLinear(const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { gatherCudaLinearKernel<X,Y><<<128, 256, 1024, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } ////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __host__ static void gatherCudaLauncher(const cudaStream_t *stream, const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) { gatherCuda<X,Y><<<numOfSubArrs, MAX_NUM_THREADS, 1024, *stream>>>(numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, vz, zShapeInfo, zOffsets); } ////////////////////////////////////////////////////////////////////// void gather(nd4j::LaunchContext * context, const NDArray* input, const NDArray* indices, NDArray* output, const std::vector<int>& intArgs) { const int inputRank = input->rankOf(); int axis = intArgs.size() > 0 ? intArgs[0] : 0; if(axis < 0) axis += inputRank; const int numOfIntArgs = intArgs.size(); if (indices == nullptr && numOfIntArgs == 2) { // scalar case output->assign((*input)(intArgs[1], {axis})); } else if (indices != nullptr && indices->isScalar()) { if(input->rankOf() <= 1) { //For scalar indices, rank 0 or 1 input: can't do tensor along dimension 0 as this is whole array... instead, we want to get a scalar auto idx = indices->e<Nd4jLong>(0); auto scalarNDArray = input->e(idx); output->assign(scalarNDArray); } else { NDArray inSubArr = (*input)(indices->e<Nd4jLong>(0), {axis}); output->assign(inSubArr); } } else { NDArray* pIndices = const_cast<NDArray*>(indices); if(indices == nullptr) pIndices = new NDArray(input->ordering(), {numOfIntArgs-1}, std::vector<double>(intArgs.begin() + 1, intArgs.end()), DataType::INT64, input->getContext()); std::vector<int> dimsOut(pIndices->rankOf()); std::iota(dimsOut.begin(), dimsOut.end(), axis); // fill with axis, axis+1, ... axis+pIndices->rankOf()-1 const Nd4jLong numOfSubArrs = pIndices->lengthOf(); Nd4jLong *outSubArrShapeInfo(nullptr), *inSubArrShapeInfo(nullptr), *outSubArrOffsets(nullptr), *inSubArrOffsets(nullptr); input-> getSubArrShapeAndOffsets({axis}, inSubArrShapeInfo, inSubArrOffsets); output->getSubArrShapeAndOffsets(dimsOut, outSubArrShapeInfo, outSubArrOffsets); if (output->rankOf() > 1) { PointersManager manager(context, "gather"); auto xShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrShapeInfo, shape::shapeInfoByteLength( inSubArrShapeInfo))); auto zShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrShapeInfo, shape::shapeInfoByteLength( outSubArrShapeInfo))); auto xOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrOffsets, (input->lengthOf() / shape::length( inSubArrShapeInfo)) * sizeof(Nd4jLong))); auto zOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrOffsets, (output->lengthOf() / shape::length(outSubArrShapeInfo)) * sizeof(Nd4jLong))); NDArray::prepareSpecialUse({output}, {input, pIndices}); BUILD_DOUBLE_SELECTOR(input->dataType(), pIndices->dataType(), gatherCudaLauncher, (context->getCudaStream(), numOfSubArrs, input->getSpecialBuffer(), xShapeInfo, xOffsets, pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->getSpecialBuffer(), zShapeInfo, zOffsets), NUMERIC_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({output}, {input, pIndices}); manager.synchronize(); } else { NDArray::prepareSpecialUse({output}, {input, pIndices}); BUILD_DOUBLE_SELECTOR(input->dataType(), pIndices->dataType(), gatherCudaLinear, (context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()), NUMERIC_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({output}, {input, pIndices}); } if(indices == nullptr) delete pIndices; } } BUILD_DOUBLE_TEMPLATE(template void gatherCudaLauncher, (const cudaStream_t *stream, const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets), NUMERIC_TYPES, INTEGER_TYPES); BUILD_DOUBLE_TEMPLATE(template void gatherCudaLinear, (const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo), NUMERIC_TYPES, INTEGER_TYPES); } } }
504a00b841614b3c970dcf514def8abfc51e17f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "d_extract.hpp" #include <hiprand/hiprand.h> #include <stdio.h> #include <math.h> #include <float.h> #include "math_functions.hpp" void d_extract_opt::init(){ init_base(); } void d_extract_opt::reshape(int num, int channel, int height, int width){ if (!reshape_base(num, channel, height, width)) return; pidx_ = 0; mod_ = height_ + width_ + channel_ - 2; stream_ = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); index_ = at::zeros({height_,width_},at::kInt); int * idx = index_.data_ptr<int>(); start_idx_.clear(); int index = 0; for (int ps = 0; ps < height_ + width_ - 1; ps++) { start_idx_.push_back(index); for (int i = 0; i < height_; i++) { int j = ps - i; if (j < 0 || j >= width_) continue; idx[index] = i*width_ + j; index++; } } start_idx_.push_back(index); index_ = index_.to(torch::Device(torch::kCUDA, device_)); } void d_extract_opt::reshape_top(at::TensorOptions option){ std::vector<std::vector<int64_t>> shapes; shapes.push_back({num_,1,height_,width_}); reshape_top_base(option,shapes); } template <typename scalar_t> __global__ void d_extract_forward_kernel(const int num, const scalar_t * const input, const int * index, scalar_t * const output, const int start_idx, const int len_idx, const int height, const int width, const int nchannel, const int psum) { CUDA_KERNEL_LOOP(i, num) { int tl = i % len_idx; int tn = i / len_idx; int thw = index[tl + start_idx]; int tw = thw % width; int th = thw / width; int tc = psum - tw - th; int pidx = (tn*nchannel + tc)*height*width + thw; output[i] = input[pidx]; } } std::vector<at::Tensor> d_extract_opt::forward_cuda(at::Tensor bottom_data) { reshape(bottom_data.size(0), bottom_data.size(1), bottom_data.size(2), bottom_data.size(3)); reshape_top(bottom_data.options()); const int* index = index_.data_ptr<int>(); int psum = pidx_; pidx_ = (pidx_ + 1) % mod_; AT_DISPATCH_FLOATING_TYPES( bottom_data.scalar_type(), "d_extract_forward_cuda", ([&] { const scalar_t * bottom = bottom_data.data_ptr<scalar_t>(); scalar_t * top_data = top_data_[0].data_ptr<scalar_t>(); if (is_label_) { int st = psum - channel_ + 1 < 0 ? 0 : psum - channel_ + 1; int end = psum < height_ + width_ - 2 ? psum + 1 : height_ + width_ - 1; int len_idx = start_idx_[end] - start_idx_[st]; int count = len_idx*num_ * 1; d_extract_forward_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> > (count, bottom, index, top_data, start_idx_[st], len_idx, height_, width_, channel_, psum); } else { if (psum == 0) { caffe_gpu_set(stream_, num_*width_*height_, scalar_t(0), top_data); } else { psum -= 1; int st = psum - channel_ + 1 < 0 ? 0 : psum - channel_ + 1; int end = psum < height_ + width_ - 2 ? psum + 1 : height_ + width_ - 1; int len_idx = start_idx_[end] - start_idx_[st]; int count = len_idx*num_ * 1; d_extract_forward_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> > (count, bottom, index, top_data, start_idx_[st], len_idx, height_, width_, channel_, psum); } } CUDA_POST_KERNEL_CHECK; } ) ); return top_data_; } std::vector<at::Tensor> d_extract_opt::backward_cuda(at::Tensor top_diff) { return {}; }
504a00b841614b3c970dcf514def8abfc51e17f4.cu
#include "d_extract.hpp" #include <curand.h> #include <stdio.h> #include <math.h> #include <float.h> #include "math_functions.hpp" void d_extract_opt::init(){ init_base(); } void d_extract_opt::reshape(int num, int channel, int height, int width){ if (!reshape_base(num, channel, height, width)) return; pidx_ = 0; mod_ = height_ + width_ + channel_ - 2; stream_ = at::cuda::getCurrentCUDAStream(); index_ = at::zeros({height_,width_},at::kInt); int * idx = index_.data_ptr<int>(); start_idx_.clear(); int index = 0; for (int ps = 0; ps < height_ + width_ - 1; ps++) { start_idx_.push_back(index); for (int i = 0; i < height_; i++) { int j = ps - i; if (j < 0 || j >= width_) continue; idx[index] = i*width_ + j; index++; } } start_idx_.push_back(index); index_ = index_.to(torch::Device(torch::kCUDA, device_)); } void d_extract_opt::reshape_top(at::TensorOptions option){ std::vector<std::vector<int64_t>> shapes; shapes.push_back({num_,1,height_,width_}); reshape_top_base(option,shapes); } template <typename scalar_t> __global__ void d_extract_forward_kernel(const int num, const scalar_t * const input, const int * index, scalar_t * const output, const int start_idx, const int len_idx, const int height, const int width, const int nchannel, const int psum) { CUDA_KERNEL_LOOP(i, num) { int tl = i % len_idx; int tn = i / len_idx; int thw = index[tl + start_idx]; int tw = thw % width; int th = thw / width; int tc = psum - tw - th; int pidx = (tn*nchannel + tc)*height*width + thw; output[i] = input[pidx]; } } std::vector<at::Tensor> d_extract_opt::forward_cuda(at::Tensor bottom_data) { reshape(bottom_data.size(0), bottom_data.size(1), bottom_data.size(2), bottom_data.size(3)); reshape_top(bottom_data.options()); const int* index = index_.data_ptr<int>(); int psum = pidx_; pidx_ = (pidx_ + 1) % mod_; AT_DISPATCH_FLOATING_TYPES( bottom_data.scalar_type(), "d_extract_forward_cuda", ([&] { const scalar_t * bottom = bottom_data.data_ptr<scalar_t>(); scalar_t * top_data = top_data_[0].data_ptr<scalar_t>(); if (is_label_) { int st = psum - channel_ + 1 < 0 ? 0 : psum - channel_ + 1; int end = psum < height_ + width_ - 2 ? psum + 1 : height_ + width_ - 1; int len_idx = start_idx_[end] - start_idx_[st]; int count = len_idx*num_ * 1; d_extract_forward_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> > (count, bottom, index, top_data, start_idx_[st], len_idx, height_, width_, channel_, psum); } else { if (psum == 0) { caffe_gpu_set(stream_, num_*width_*height_, scalar_t(0), top_data); } else { psum -= 1; int st = psum - channel_ + 1 < 0 ? 0 : psum - channel_ + 1; int end = psum < height_ + width_ - 2 ? psum + 1 : height_ + width_ - 1; int len_idx = start_idx_[end] - start_idx_[st]; int count = len_idx*num_ * 1; d_extract_forward_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> > (count, bottom, index, top_data, start_idx_[st], len_idx, height_, width_, channel_, psum); } } CUDA_POST_KERNEL_CHECK; } ) ); return top_data_; } std::vector<at::Tensor> d_extract_opt::backward_cuda(at::Tensor top_diff) { return {}; }
145888e46905e0115f3cdac42280feb404bfc1a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define BILLION 1000000000.0F // Pour pouvoir experimenter les performances avec les diffrents types // FMT Permet d'avoir un % adapt pour le printf et donc de pas avoir de warning #define TYPE int #define FMT "d" typedef struct { int x ; int y ; } Point ; void Affiche (char * tabMsg, TYPE * ptBuffer, int NB) { TYPE * pt = ptBuffer ; for ( int k = 0 ; k < NB ; k++ , pt ++) { printf(" - %s[%03d] = %6" FMT, tabMsg, k , *pt) ; if ((k % 5) == (4)) { printf("\n") ; fflush(stdout); } } printf("\n") ; fflush(stdout); } int check(char * msg, int Nb, TYPE * pto) { TYPE * pt1 = pto ; TYPE * pt2 = pto + 1 ; int i ; for (i = 0 ; i < Nb-1 ; i ++) { if (*pt1 > *pt2) { printf("Check %s pour %d - Erreur en position %d %" FMT " > %" FMT " \n", msg, Nb, i, *pt1, *pt2) ; // return i ; exit(25) ; } pt1 ++ ; pt2 ++ ; } printf("Check %s pour %d est OK \n", msg, Nb) ; return 0 ; } __global__ void MergeSmallBatch_k(TYPE *M, int sizeM_tot, TYPE* N, int d) { int i = threadIdx.x%d; int Qt = (threadIdx.x-i)/d; int gbx = Qt + blockIdx.x*(blockDim.x/d); if (threadIdx.x + blockIdx.x*blockDim.x >= sizeM_tot) return; //gerer les dbordements int t = d/2; int sizeA = t; int sizeB = t; M=M+gbx*d; TYPE* A=M; TYPE* B=A+sizeA; Point K, P, Q; int offset ; if (i > sizeA) { K.x = i - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = i - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { N[i+gbx*d] = A[Q.y] ; } else { N[i+gbx*d] = B[Q.x] ; } break ; } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } int main(int argc, char ** argv) { //dclaration int N = 10000; hipError_t errCuda; TYPE* ABAB; //[A_0,B_0,A_1,B_1,...] TYPE* MM; // [M_0,M_1,...], les merges respectifs de [A_0,B_0,A_1,B_1,...] TYPE* cudaABAB; TYPE* cudaMM; for (int d=4; d<=1024; d=d*2) { float m1; hipEvent_t Start; hipEvent_t Stop; hipEventCreate(&Start) ; hipEventCreate(&Stop) ; int size_total=d*N; //allocation if ((ABAB = (TYPE *) malloc(size_total * sizeof(TYPE))) == NULL) { printf("PB allocation Vecteur Ori\n") ; exit (1) ; } if ((MM = (TYPE *) malloc(size_total * sizeof(TYPE))) == NULL) { printf("PB allocation Vecteur Dest\n") ; exit (1) ; } //initialisation srand(5); for (int i =0; i<size_total; i++) { if (i%(d/2)==0) ABAB[i] = rand()%100; else ABAB[i]=ABAB[i-1]+rand()%100; } // Il faut que tous les A et les B soient tris // Donc ABAB est tri par blocs de taille (d/2) //Allocation if (hipSuccess != (errCuda = hipMalloc((void**)&cudaABAB, size_total * sizeof(TYPE)))) { printf("PB allocation CudaVecteurABAB - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } // cleanup a rajouter pour plus propre if (hipSuccess != (errCuda = hipMalloc((void**)&cudaMM, size_total * sizeof(TYPE)))) { printf("PB allocation CudaVecteurMM - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } // cleanup a rajouter pour plus propre if (hipSuccess != (errCuda = hipMemcpy(cudaABAB, ABAB, size_total * sizeof(TYPE), hipMemcpyHostToDevice))) { printf("PB Copie ABAB -> cudaABAB - %d - %s \n",errCuda,hipGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } hipEventRecord(Start); hipLaunchKernelGGL(( MergeSmallBatch_k), dim3(1024),dim3(1024), 0, 0, cudaABAB,size_total,cudaMM,d); //a revoir hipEventRecord(Stop); if (hipSuccess != hipMemcpy(MM, cudaMM, size_total * sizeof(TYPE), hipMemcpyDeviceToHost)) { printf("PB copie cudaMM -> MM \n") ; fflush(stdout); exit(2) ; } hipEventElapsedTime(&m1, Start, Stop) ; printf("Duree pour d = %4d : %f ms\n",d,m1) ; //free free(MM); free(ABAB); if (cudaABAB != NULL) { hipFree(cudaABAB) ; cudaABAB = NULL ; } if (cudaMM != NULL) { hipFree(cudaMM) ; cudaMM = NULL ; } } return 0 ; }
145888e46905e0115f3cdac42280feb404bfc1a0.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #define BILLION 1000000000.0F // Pour pouvoir experimenter les performances avec les différents types // FMT Permet d'avoir un % adapté pour le printf et donc de pas avoir de warning #define TYPE int #define FMT "d" typedef struct { int x ; int y ; } Point ; void Affiche (char * tabMsg, TYPE * ptBuffer, int NB) { TYPE * pt = ptBuffer ; for ( int k = 0 ; k < NB ; k++ , pt ++) { printf(" - %s[%03d] = %6" FMT, tabMsg, k , *pt) ; if ((k % 5) == (4)) { printf("\n") ; fflush(stdout); } } printf("\n") ; fflush(stdout); } int check(char * msg, int Nb, TYPE * pto) { TYPE * pt1 = pto ; TYPE * pt2 = pto + 1 ; int i ; for (i = 0 ; i < Nb-1 ; i ++) { if (*pt1 > *pt2) { printf("Check %s pour %d - Erreur en position %d %" FMT " > %" FMT " \n", msg, Nb, i, *pt1, *pt2) ; // return i ; exit(25) ; } pt1 ++ ; pt2 ++ ; } printf("Check %s pour %d est OK \n", msg, Nb) ; return 0 ; } __global__ void MergeSmallBatch_k(TYPE *M, int sizeM_tot, TYPE* N, int d) { int i = threadIdx.x%d; int Qt = (threadIdx.x-i)/d; int gbx = Qt + blockIdx.x*(blockDim.x/d); if (threadIdx.x + blockIdx.x*blockDim.x >= sizeM_tot) return; //gerer les débordements int t = d/2; int sizeA = t; int sizeB = t; M=M+gbx*d; TYPE* A=M; TYPE* B=A+sizeA; Point K, P, Q; int offset ; if (i > sizeA) { K.x = i - sizeA ; K.y = sizeA ; P.x = sizeA ; P.y = i - sizeA ; } else // x ~ horizontal { K.x = 0 ; K.y = i ; P.x = i ; P.y = 0 ; } while (1) { offset = abs(K.y - P.y) / 2 ; Q.x = K.x + offset ; Q.y = K.y - offset ; if ( (Q.y >= 0) && (Q.x <= sizeB) && ( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) ) { if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x])) { if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x]))) { N[i+gbx*d] = A[Q.y] ; } else { N[i+gbx*d] = B[Q.x] ; } break ; } else { K.x = Q.x + 1 ; K.y = Q.y - 1 ; } } else { P.x = Q.x -1 ; P.y = Q.y + 1 ; } } } int main(int argc, char ** argv) { //déclaration int N = 10000; cudaError_t errCuda; TYPE* ABAB; //[A_0,B_0,A_1,B_1,...] TYPE* MM; // [M_0,M_1,...], les merges respectifs de [A_0,B_0,A_1,B_1,...] TYPE* cudaABAB; TYPE* cudaMM; for (int d=4; d<=1024; d=d*2) { float m1; cudaEvent_t Start; cudaEvent_t Stop; cudaEventCreate(&Start) ; cudaEventCreate(&Stop) ; int size_total=d*N; //allocation if ((ABAB = (TYPE *) malloc(size_total * sizeof(TYPE))) == NULL) { printf("PB allocation Vecteur Ori\n") ; exit (1) ; } if ((MM = (TYPE *) malloc(size_total * sizeof(TYPE))) == NULL) { printf("PB allocation Vecteur Dest\n") ; exit (1) ; } //initialisation srand(5); for (int i =0; i<size_total; i++) { if (i%(d/2)==0) ABAB[i] = rand()%100; else ABAB[i]=ABAB[i-1]+rand()%100; } // Il faut que tous les A et les B soient triés // Donc ABAB est trié par blocs de taille (d/2) //Allocation if (cudaSuccess != (errCuda = cudaMalloc((void**)&cudaABAB, size_total * sizeof(TYPE)))) { printf("PB allocation CudaVecteurABAB - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } // cleanup a rajouter pour plus propre if (cudaSuccess != (errCuda = cudaMalloc((void**)&cudaMM, size_total * sizeof(TYPE)))) { printf("PB allocation CudaVecteurMM - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } // cleanup a rajouter pour plus propre if (cudaSuccess != (errCuda = cudaMemcpy(cudaABAB, ABAB, size_total * sizeof(TYPE), cudaMemcpyHostToDevice))) { printf("PB Copie ABAB -> cudaABAB - %d - %s \n",errCuda,cudaGetErrorName(errCuda)) ; fflush(stdout); exit (1) ; } cudaEventRecord(Start); MergeSmallBatch_k<<<1024,1024>>>(cudaABAB,size_total,cudaMM,d); //a revoir cudaEventRecord(Stop); if (cudaSuccess != cudaMemcpy(MM, cudaMM, size_total * sizeof(TYPE), cudaMemcpyDeviceToHost)) { printf("PB copie cudaMM -> MM \n") ; fflush(stdout); exit(2) ; } cudaEventElapsedTime(&m1, Start, Stop) ; printf("Duree pour d = %4d : %f ms\n",d,m1) ; //free free(MM); free(ABAB); if (cudaABAB != NULL) { cudaFree(cudaABAB) ; cudaABAB = NULL ; } if (cudaMM != NULL) { cudaFree(cudaMM) ; cudaMM = NULL ; } } return 0 ; }
fd856ce3aa52c5bfd7c99665bf842c525f35ed89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "hip/device_functions.h" #include "device_launch_parameters.h" #include "iostream" #include "stdio.h" #include "cuda_gobal.h" __device__ double *dev_pointl = 0; __device__ double *dev_pointr = 0; __device__ float *dev_p = 0; __constant__ float pA1[9], pA2[9], pAd1[3], pAd2[3], pAD[3]; __global__ void reconKernel(double *lPoints, double *rPoints, float *pCloud, int pointnum) { // float U1, U2, V1, V2; CUDA_KERNEL_LOOP(i, pointnum) { U1 = lPoints[2 * i]; V1 = lPoints[2 * i + 1]; U2 = rPoints[2 * i]; V2 = rPoints[2 * i + 1]; if (U2 > 1280 || U2 < 0 || V2 > 800 || V2 < 0) { pCloud[3 * i] = 0; pCloud[3 * i + 1] = 0; pCloud[3 * i + 2] = 0; //respoint[0]=0; //respoint[1] = 0; //respoint[2] = 0; } else { float res1[3]; float res2[3]; float B1[3]; float B2[3]; float B[6]; float BN[6]; float BxBN[4]; float invBxBN[4]; float invB[6]; float S[2]; float det; float vdet[4]; float Btemp1; float Btemp2; //S //BB for (int var = 0; var <3; ++var) { Btemp1 = pA1[var * 3] * U1 + pA1[var * 3 + 1] * V1 + pA1[var * 3 + 2]; Btemp2 = pA2[var * 3] * U2 + pA2[var * 3 + 1] * V2 + pA2[var * 3 + 2]; B1[var] = Btemp1; B2[var] = Btemp2; B[var * 2] = Btemp1; B[var * 2 + 1] = -Btemp2; BN[var] = Btemp1; BN[var + 3] = -Btemp2; } //BB BxBN[0] = BN[0] * B[0] + BN[1] * B[2] + BN[2] * B[4]; BxBN[1] = BN[0] * B[1] + BN[1] * B[3] + BN[2] * B[5]; BxBN[2] = BN[3] * B[0] + BN[4] * B[2] + BN[5] * B[4]; BxBN[3] = BN[3] * B[1] + BN[4] * B[3] + BN[5] * B[5]; // det = 1 / (BxBN[0] * BxBN[3] - BxBN[1] * BxBN[2]); // invBxBN[0] = BxBN[3] * det; invBxBN[1] = -BxBN[1] * det; invBxBN[2] = -BxBN[2] * det; invBxBN[3] = BxBN[0] * det; //BxBNB for (int var = 0; var < 3; ++var) { invB[var] = invBxBN[0] * BN[var] + invBxBN[1] * BN[var + 3]; invB[var + 3] = invBxBN[2] * BN[var] + invBxBN[3] * BN[var + 3]; } //S S[0] = invB[0] * pAD[0] + invB[1] * pAD[1] + invB[2] * pAD[2]; S[1] = invB[3] * pAD[0] + invB[4] * pAD[1] + invB[5] * pAD[2]; // //float xyz[3]; //pxyz xyzp; for (int var = 0; var < 3; ++var) { res1[var] = B1[var] * S[0] - pAd1[var]; res2[var] = B2[var] * S[1] - pAd2[var]; //respoint[ var] = (res1[var] + res2[var]) / 2; pCloud[3 * i + var] = (res1[var] + res2[var]) / 2; } } //__syncthreads(); //pCloud[3 * i] = respoint[0]; //pCloud[3 * i+1] = respoint[1]; //pCloud[3 * i+2] = respoint[2]; //printf("3:%f ", pCloud[3 * i]); //std::cout << << std::endl; } } STRUCTLIGHTCUDA_API int reconstruct3D_gpu(double* lPoints, double* rPoints, float *pCloud, int pointsNum , float *cpA1, float *cpA2, float *cpAd1, float *cpAd2, float *cpAD) { int result = -1; static int count = 0; hipError_t cudaStatus; // GPU. cudaStatus = hipMemcpy(dev_pointr, rPoints, 2 * pointsNum * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { result = 6; goto Error; } if (count == 0) { cudaStatus = hipMemcpy(dev_pointl, lPoints, 2 * pointsNum * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { result = 6; goto Error; } cudaStatus = hipMemcpyToSymbol(pA1, cpA1, 9 * sizeof(float)); if (cudaStatus != hipSuccess) { result = 7; goto Error; } cudaStatus = hipMemcpyToSymbol(pA2, cpA2, 9 * sizeof(float)); if (cudaStatus != hipSuccess) { result = 8; goto Error; } cudaStatus = hipMemcpyToSymbol(pAd1, cpAd1, 3 * sizeof(float)); if (cudaStatus != hipSuccess) { result = 9; goto Error; } cudaStatus = hipMemcpyToSymbol(pAd2, cpAd2, 3 * sizeof(float)); if (cudaStatus != hipSuccess) { result = 10; goto Error; } cudaStatus = hipMemcpyToSymbol(pAD, cpAD, 3 * sizeof(float)); if (cudaStatus != hipSuccess) { result = 11; goto Error; } count = 1; } reconKernel <<< GET_BLOCKS(pointsNum), CUDA_NUM_THREADS >> >(dev_pointl, dev_pointr, dev_p, pointsNum); // cudaDeviceSynchronizeGPU //cudaStatus = hipDeviceSynchronize(); //if (cudaStatus != hipSuccess) { // result = 7; // goto Error; //} // GPU cudaStatus = hipMemcpy(pCloud, dev_p, 3 * pointsNum * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { result = 12; goto Error; } result = 0; Error: // return result; } STRUCTLIGHTCUDA_API int initrecmemorygpu(int size) { hipError_t cudaStatus; int result = 0; static int inited = 0; if (inited == 1) { return 1; } // GPU //if (opengpu()<0) { // printf("cannot open cuda"); // result = 1; //} // GPUdev_adev_bdev_c. cudaStatus = hipMalloc((void**)&dev_p, 3 * size * sizeof(float)); if (cudaStatus != hipSuccess) { result = 2; } cudaStatus = hipMalloc((void**)&dev_pointr, 2 * size * sizeof(double)); if (cudaStatus != hipSuccess) { result = 4; } cudaStatus = hipMalloc((void**)&dev_pointl, 2 * size * sizeof(double)); if (cudaStatus != hipSuccess) { result = 4; } inited = 1; return result; } STRUCTLIGHTCUDA_API int freerecmemorygpu() { static int freed = 0; if (freed == 1) { return 1; } if (dev_p != 0 || dev_pointr != 0 || dev_pointl != 0) { hipFree(dev_p); hipFree(dev_pointl); hipFree(dev_pointr); } freed = 1; return 1; }
fd856ce3aa52c5bfd7c99665bf842c525f35ed89.cu
#include "cuda_runtime.h" #include "cuda.h" #include "device_functions.h" #include "device_launch_parameters.h" #include "iostream" #include "stdio.h" #include "cuda_gobal.h" __device__ double *dev_pointl = 0; __device__ double *dev_pointr = 0; __device__ float *dev_p = 0; __constant__ float pA1[9], pA2[9], pAd1[3], pAd2[3], pAD[3]; __global__ void reconKernel(double *lPoints, double *rPoints, float *pCloud, int pointnum) { // float U1, U2, V1, V2; CUDA_KERNEL_LOOP(i, pointnum) { U1 = lPoints[2 * i]; V1 = lPoints[2 * i + 1]; U2 = rPoints[2 * i]; V2 = rPoints[2 * i + 1]; if (U2 > 1280 || U2 < 0 || V2 > 800 || V2 < 0) { pCloud[3 * i] = 0; pCloud[3 * i + 1] = 0; pCloud[3 * i + 2] = 0; //respoint[0]=0; //respoint[1] = 0; //respoint[2] = 0; } else { float res1[3]; float res2[3]; float B1[3]; float B2[3]; float B[6]; float BN[6]; float BxBN[4]; float invBxBN[4]; float invB[6]; float S[2]; float det; float vdet[4]; float Btemp1; float Btemp2; //使用先求出放大倍数S的方法,只需要对二维矩阵求逆 //求B与B的转置 for (int var = 0; var <3; ++var) { Btemp1 = pA1[var * 3] * U1 + pA1[var * 3 + 1] * V1 + pA1[var * 3 + 2]; Btemp2 = pA2[var * 3] * U2 + pA2[var * 3 + 1] * V2 + pA2[var * 3 + 2]; B1[var] = Btemp1; B2[var] = Btemp2; B[var * 2] = Btemp1; B[var * 2 + 1] = -Btemp2; BN[var] = Btemp1; BN[var + 3] = -Btemp2; } //B的转置与B相乘 BxBN[0] = BN[0] * B[0] + BN[1] * B[2] + BN[2] * B[4]; BxBN[1] = BN[0] * B[1] + BN[1] * B[3] + BN[2] * B[5]; BxBN[2] = BN[3] * B[0] + BN[4] * B[2] + BN[5] * B[4]; BxBN[3] = BN[3] * B[1] + BN[4] * B[3] + BN[5] * B[5]; //计算行列式 det = 1 / (BxBN[0] * BxBN[3] - BxBN[1] * BxBN[2]); //二维矩阵求逆 invBxBN[0] = BxBN[3] * det; invBxBN[1] = -BxBN[1] * det; invBxBN[2] = -BxBN[2] * det; invBxBN[3] = BxBN[0] * det; //BxBN逆矩阵与B相乘 for (int var = 0; var < 3; ++var) { invB[var] = invBxBN[0] * BN[var] + invBxBN[1] * BN[var + 3]; invB[var + 3] = invBxBN[2] * BN[var] + invBxBN[3] * BN[var + 3]; } //求出两个放大系数S S[0] = invB[0] * pAD[0] + invB[1] * pAD[1] + invB[2] * pAD[2]; S[1] = invB[3] * pAD[0] + invB[4] * pAD[1] + invB[5] * pAD[2]; //分别求出坐标,取平均 //float xyz[3]; //pxyz xyzp; for (int var = 0; var < 3; ++var) { res1[var] = B1[var] * S[0] - pAd1[var]; res2[var] = B2[var] * S[1] - pAd2[var]; //respoint[ var] = (res1[var] + res2[var]) / 2; pCloud[3 * i + var] = (res1[var] + res2[var]) / 2; } } //__syncthreads(); //pCloud[3 * i] = respoint[0]; //pCloud[3 * i+1] = respoint[1]; //pCloud[3 * i+2] = respoint[2]; //printf("3:%f ", pCloud[3 * i]); //std::cout << << std::endl; } } STRUCTLIGHTCUDA_API int reconstruct3D_gpu(double* lPoints, double* rPoints, float *pCloud, int pointsNum , float *cpA1, float *cpA2, float *cpAd1, float *cpAd2, float *cpAD) { int result = -1; static int count = 0; cudaError_t cudaStatus; // 从主机内存复制数据到GPU内存中. cudaStatus = cudaMemcpy(dev_pointr, rPoints, 2 * pointsNum * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { result = 6; goto Error; } if (count == 0) { cudaStatus = cudaMemcpy(dev_pointl, lPoints, 2 * pointsNum * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { result = 6; goto Error; } cudaStatus = cudaMemcpyToSymbol(pA1, cpA1, 9 * sizeof(float)); if (cudaStatus != cudaSuccess) { result = 7; goto Error; } cudaStatus = cudaMemcpyToSymbol(pA2, cpA2, 9 * sizeof(float)); if (cudaStatus != cudaSuccess) { result = 8; goto Error; } cudaStatus = cudaMemcpyToSymbol(pAd1, cpAd1, 3 * sizeof(float)); if (cudaStatus != cudaSuccess) { result = 9; goto Error; } cudaStatus = cudaMemcpyToSymbol(pAd2, cpAd2, 3 * sizeof(float)); if (cudaStatus != cudaSuccess) { result = 10; goto Error; } cudaStatus = cudaMemcpyToSymbol(pAD, cpAD, 3 * sizeof(float)); if (cudaStatus != cudaSuccess) { result = 11; goto Error; } count = 1; } reconKernel <<< GET_BLOCKS(pointsNum), CUDA_NUM_THREADS >> >(dev_pointl, dev_pointr, dev_p, pointsNum); // 采用cudaDeviceSynchronize等待GPU内核函数执行完成并且返回遇到的任何错误信息 //cudaStatus = cudaDeviceSynchronize(); //if (cudaStatus != cudaSuccess) { // result = 7; // goto Error; //} // 从GPU内存中复制数据到主机内存中 cudaStatus = cudaMemcpy(pCloud, dev_p, 3 * pointsNum * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { result = 12; goto Error; } result = 0; Error: //释放设备中变量所占内存 return result; } STRUCTLIGHTCUDA_API int initrecmemorygpu(int size) { cudaError_t cudaStatus; int result = 0; static int inited = 0; if (inited == 1) { return 1; } // 选择用于运行的GPU //if (opengpu()<0) { // printf("cannot open cuda"); // result = 1; //} // 在GPU中为变量dev_a、dev_b、dev_c分配内存空间. cudaStatus = cudaMalloc((void**)&dev_p, 3 * size * sizeof(float)); if (cudaStatus != cudaSuccess) { result = 2; } cudaStatus = cudaMalloc((void**)&dev_pointr, 2 * size * sizeof(double)); if (cudaStatus != cudaSuccess) { result = 4; } cudaStatus = cudaMalloc((void**)&dev_pointl, 2 * size * sizeof(double)); if (cudaStatus != cudaSuccess) { result = 4; } inited = 1; return result; } STRUCTLIGHTCUDA_API int freerecmemorygpu() { static int freed = 0; if (freed == 1) { return 1; } if (dev_p != 0 || dev_pointr != 0 || dev_pointl != 0) { cudaFree(dev_p); cudaFree(dev_pointl); cudaFree(dev_pointr); } freed = 1; return 1; }
33ad187661d2fc2a5b08deb18ad7ccaf507cb982.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- // #include "ATen/hip/HIPContext.h" #include "open3d/core/nns/FixedRadiusSearchImpl.cuh" #include "open3d/ml/pytorch/TorchHelper.h" #include "torch/script.h" template <class T> void BuildSpatialHashTableCUDA(const torch::Tensor& points, double radius, const torch::Tensor& points_row_splits, const std::vector<uint32_t>& hash_table_splits, torch::Tensor& hash_table_index, torch::Tensor& hash_table_cell_splits) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); const int texture_alignment = cuda_device_props->textureAlignment; void* temp_ptr = nullptr; size_t temp_size = 0; // determine temp_size open3d::core::nns::impl::BuildSpatialHashTableCUDA( stream, temp_ptr, temp_size, texture_alignment, points.size(0), points.data_ptr<T>(), T(radius), points_row_splits.size(0), points_row_splits.data_ptr<int64_t>(), hash_table_splits.data(), hash_table_cell_splits.size(0), (uint32_t*)hash_table_cell_splits.data_ptr<int32_t>(), (uint32_t*)hash_table_index.data_ptr<int32_t>()); auto device = points.device(); auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr); // actually build the table open3d::core::nns::impl::BuildSpatialHashTableCUDA( stream, temp_ptr, temp_size, texture_alignment, points.size(0), points.data_ptr<T>(), T(radius), points_row_splits.size(0), points_row_splits.data_ptr<int64_t>(), hash_table_splits.data(), hash_table_cell_splits.size(0), (uint32_t*)hash_table_cell_splits.data_ptr<int32_t>(), (uint32_t*)hash_table_index.data_ptr<int32_t>()); } #define INSTANTIATE(T) \ template void BuildSpatialHashTableCUDA<T>( \ const torch::Tensor&, double, const torch::Tensor&, \ const std::vector<uint32_t>&, torch::Tensor&, torch::Tensor&); INSTANTIATE(float)
33ad187661d2fc2a5b08deb18ad7ccaf507cb982.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- // #include "ATen/cuda/CUDAContext.h" #include "open3d/core/nns/FixedRadiusSearchImpl.cuh" #include "open3d/ml/pytorch/TorchHelper.h" #include "torch/script.h" template <class T> void BuildSpatialHashTableCUDA(const torch::Tensor& points, double radius, const torch::Tensor& points_row_splits, const std::vector<uint32_t>& hash_table_splits, torch::Tensor& hash_table_index, torch::Tensor& hash_table_cell_splits) { auto stream = at::cuda::getCurrentCUDAStream(); auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); const int texture_alignment = cuda_device_props->textureAlignment; void* temp_ptr = nullptr; size_t temp_size = 0; // determine temp_size open3d::core::nns::impl::BuildSpatialHashTableCUDA( stream, temp_ptr, temp_size, texture_alignment, points.size(0), points.data_ptr<T>(), T(radius), points_row_splits.size(0), points_row_splits.data_ptr<int64_t>(), hash_table_splits.data(), hash_table_cell_splits.size(0), (uint32_t*)hash_table_cell_splits.data_ptr<int32_t>(), (uint32_t*)hash_table_index.data_ptr<int32_t>()); auto device = points.device(); auto temp_tensor = CreateTempTensor(temp_size, device, &temp_ptr); // actually build the table open3d::core::nns::impl::BuildSpatialHashTableCUDA( stream, temp_ptr, temp_size, texture_alignment, points.size(0), points.data_ptr<T>(), T(radius), points_row_splits.size(0), points_row_splits.data_ptr<int64_t>(), hash_table_splits.data(), hash_table_cell_splits.size(0), (uint32_t*)hash_table_cell_splits.data_ptr<int32_t>(), (uint32_t*)hash_table_index.data_ptr<int32_t>()); } #define INSTANTIATE(T) \ template void BuildSpatialHashTableCUDA<T>( \ const torch::Tensor&, double, const torch::Tensor&, \ const std::vector<uint32_t>&, torch::Tensor&, torch::Tensor&); INSTANTIATE(float)
5dad1c34dc03699917085b3c43880fb67956f102.hip
// !!! This is a file automatically generated by hipify!!! /* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_multiplyV(Sparse/Dense(X),Sparse/Dense(Y), alpha). * Z= alpha*X*Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include "CuMatlab_sparseSSR.cu" #include "CuMatlab_sparseSSC.cu" #include "CuMatlab_sparseDDR.cu" #include "CuMatlab_sparseDDC.cu" #include "CuMatlab_sparseSDR.cu" #include "CuMatlab_sparseSDC.cu" #include "CuMatlab_sparseDSR.cu" #include "CuMatlab_sparseDSC.cu" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> extern "C" static void mexCuMatlab_sparseSSR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseSSC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseDDR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseDDC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseSDR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseDSC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } if (nrhs==3 && nlhs==1) { if (mxIsGPUArray(prhs[0]) && mxIsGPUArray(prhs[1])) { mxGPUArray const *tempGPU1; tempGPU1 = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const *tempGPU2; tempGPU2 = mxGPUCreateFromMxArray(prhs[1]); mxGPUArray const *tempGPU3; tempGPU3 = mxGPUCreateFromMxArray(prhs[2]); if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxREAL) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxREAL) ){ if ( mxGPUGetComplexity(tempGPU3) != mxREAL ) { mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be scalar and double precision. %s\n"); return; } if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) { mexCuMatlab_sparseSSR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) { mexCuMatlab_sparseDDR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) { mexCuMatlab_sparseSDR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) { mexCuMatlab_sparseDSR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } } else if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxCOMPLEX) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxCOMPLEX) ){ if ( mxGPUGetComplexity(tempGPU3) != mxCOMPLEX ){ mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be complex and double precision. %s\n"); return; } if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){ mexCuMatlab_sparseSSC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){ mexCuMatlab_sparseDDC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){ mexCuMatlab_sparseSDC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){ mexCuMatlab_sparseDSC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } } else{ mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else if(!mxIsGPUArray(prhs[0]) && !mxIsGPUArray(prhs[1])) { mxGPUArray const *tempGPU3; tempGPU3 = mxGPUCreateFromMxArray(prhs[2]); if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[1]))){ if ( mxGPUGetComplexity(tempGPU3) != mxREAL ) { mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be scalar and double precision. %s\n"); return; } if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) { mexCuMatlab_sparseSSR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) { mexCuMatlab_sparseDDR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) { mexCuMatlab_sparseSDR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) { mexCuMatlab_sparseDSR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } } else if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[1]))){ if ( mxGPUGetComplexity(tempGPU3) != mxCOMPLEX ){ mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be complex and double precision. %s\n"); return; } if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){ mexCuMatlab_sparseSSC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){ mexCuMatlab_sparseDDC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){ mexCuMatlab_sparseSDC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){ mexCuMatlab_sparseDSC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } } else if ((nrhs>3) || (nrhs<3) || (nlhs<1) || (nlhs>1)){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input/output arguments! input arguments must be three and output argument must be one \n"); return; } }
5dad1c34dc03699917085b3c43880fb67956f102.cu
/* * This CUDA-Cusparse code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0]/prhs[1] := mxGPUArray or CPU Array}[double/complex double] * Sparse/Dense matrix-sparse/dense vector multiplication Z=CuMatlab_multiplyV(Sparse/Dense(X),Sparse/Dense(Y), alpha). * Z= alpha*X*Y * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cusparse_v2.h> #include "CuMatlab_sparseSSR.cu" #include "CuMatlab_sparseSSC.cu" #include "CuMatlab_sparseDDR.cu" #include "CuMatlab_sparseDDC.cu" #include "CuMatlab_sparseSDR.cu" #include "CuMatlab_sparseSDC.cu" #include "CuMatlab_sparseDSR.cu" #include "CuMatlab_sparseDSC.cu" #include <cuda.h> #include <cuda_runtime.h> extern "C" static void mexCuMatlab_sparseSSR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseSSC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseDDR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseDDC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseSDR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseSDC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseDSR(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); extern "C" static void mexCuMatlab_sparseDSC(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } if (nrhs==3 && nlhs==1) { if (mxIsGPUArray(prhs[0]) && mxIsGPUArray(prhs[1])) { mxGPUArray const *tempGPU1; tempGPU1 = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const *tempGPU2; tempGPU2 = mxGPUCreateFromMxArray(prhs[1]); mxGPUArray const *tempGPU3; tempGPU3 = mxGPUCreateFromMxArray(prhs[2]); if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxREAL) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxREAL) ){ if ( mxGPUGetComplexity(tempGPU3) != mxREAL ) { mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be scalar and double precision. %s\n"); return; } if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) { mexCuMatlab_sparseSSR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) { mexCuMatlab_sparseDDR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))) { mexCuMatlab_sparseSDR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))) { mexCuMatlab_sparseDSR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } } else if ((mxGPUGetClassID(tempGPU1) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU1) == mxCOMPLEX) && (mxGPUGetClassID(tempGPU2) == mxDOUBLE_CLASS) && (mxGPUGetComplexity(tempGPU2) == mxCOMPLEX) ){ if ( mxGPUGetComplexity(tempGPU3) != mxCOMPLEX ){ mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be complex and double precision. %s\n"); return; } if ( (mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){ mexCuMatlab_sparseSSC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){ mexCuMatlab_sparseDDC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (mxGPUIsSparse(tempGPU1))&& (!mxGPUIsSparse(tempGPU2))){ mexCuMatlab_sparseSDC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxGPUIsSparse(tempGPU1))&& (mxGPUIsSparse(tempGPU2))){ mexCuMatlab_sparseDSC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); return; } } else{ mxGPUDestroyGPUArray(tempGPU1); mxGPUDestroyGPUArray(tempGPU2); mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } // else if(!mxIsGPUArray(prhs[0]) && !mxIsGPUArray(prhs[1])) { mxGPUArray const *tempGPU3; tempGPU3 = mxGPUCreateFromMxArray(prhs[2]); if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (!mxIsComplex(prhs[1]))){ if ( mxGPUGetComplexity(tempGPU3) != mxREAL ) { mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be scalar and double precision. %s\n"); return; } if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) { mexCuMatlab_sparseSSR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) { mexCuMatlab_sparseDDR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))) { mexCuMatlab_sparseSDR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))) { mexCuMatlab_sparseDSR(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } } else if ((mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[0])) && (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) && (mxIsComplex(prhs[1]))){ if ( mxGPUGetComplexity(tempGPU3) != mxCOMPLEX ){ mxGPUDestroyGPUArray(tempGPU3); mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments!, input(THIRD AND FOURTH ARGUMENTS) must be complex and double precision. %s\n"); return; } if ( (mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){ mexCuMatlab_sparseSSC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){ mexCuMatlab_sparseDDC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (mxIsSparse(prhs[0]))&& (!mxIsSparse(prhs[1]))){ mexCuMatlab_sparseSDC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } if ( (!mxIsSparse(prhs[0]))&& (mxIsSparse(prhs[1]))){ mexCuMatlab_sparseDSC(nlhs, plhs, nrhs, prhs); mxGPUDestroyGPUArray(tempGPU3); return; } } else{ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input arguments! %s\n"); } } } else if ((nrhs>3) || (nrhs<3) || (nlhs<1) || (nlhs>1)){ mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput", "Incorrect input/output arguments! input arguments must be three and output argument must be one \n"); return; } }
06aa49ba0b9f5f4d06c648d0011aaf1a5627d044.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "hip/hip_runtime.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { hipError_t error = hipGetLastError (); if (error != hipSuccess) { printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3) { for (int i=2; i<=N-3; i++) { double _t_12_; double _t_13_; double _t_10_; double _t_19_; double _t_17_; double _t_20_; double _t_24_; double _t_22_; double _t_25_; double _t_32_; double _t_30_; double _t_33_; double _t_37_; double _t_35_; double _t_38_; double _t_44_; double _t_42_; double _t_45_; double _t_49_; double _t_47_; double _t_50_; double _t_7_; double _t_5_; double _t_8_; double _t_23_; double _t_153_; double _t_152_; double _t_21_; double _t_1_; double _t_150_; double _t_18_; double _t_16_; double _t_15_; double _t_158_; double _t_156_; double _t_14_; double _t_48_; double _t_192_; double _t_191_; double _t_46_; double _t_26_; double _t_189_; double _t_43_; double _t_41_; double _t_40_; double _t_39_; double _t_197_; double _t_195_; double _t_36_; double _t_173_; double _t_172_; double _t_34_; double _t_170_; double _t_31_; double _t_29_; double _t_28_; double _t_178_; double _t_176_; double _t_27_; double _t_0_; double _t_11_; double _t_134_; double _t_133_; double _t_9_; double _t_131_; double _t_6_; double _t_4_; double _t_3_; double _t_139_; double _t_137_; double _t_2_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_145_; double _t_143_; double _t_164_; double _t_162_; double _t_184_; double _t_182_; double _t_203_; double _t_201_; double _t_135_; double _t_77_; double _t_132_; double _t_59_; double _t_154_; double _t_151_; double _t_56_; double _t_74_; double _t_136_; double _t_114_; double _t_130_; double _t_129_; double _t_128_; double _t_96_; double _t_155_; double _t_149_; double _t_148_; double _t_111_; double _t_93_; double _t_140_; double _t_83_; double _t_138_; double _t_65_; double _t_159_; double _t_157_; double _t_63_; double _t_81_; double _t_141_; double _t_120_; double _t_102_; double _t_160_; double _t_100_; double _t_118_; double _t_146_; double _t_88_; double _t_144_; double _t_70_; double _t_165_; double _t_163_; double _t_68_; double _t_86_; double _t_147_; double _t_125_; double _t_142_; double _t_107_; double _t_166_; double _t_161_; double _t_127_; double _t_105_; double _t_123_; double _t_174_; double _t_78_; double _t_171_; double _t_60_; double _t_193_; double _t_190_; double _t_175_; double _t_115_; double _t_169_; double _t_97_; double _t_168_; double _t_167_; double _t_194_; double _t_188_; double _t_187_; double _t_179_; double _t_84_; double _t_177_; double _t_66_; double _t_198_; double _t_196_; double _t_180_; double _t_121_; double _t_103_; double _t_199_; double _t_185_; double _t_89_; double _t_183_; double _t_71_; double _t_204_; double _t_202_; double _t_186_; double _t_126_; double _t_181_; double _t_108_; double _t_205_; double _t_200_; double _t_113_; double _t_112_; double _t_58_; double _t_57_; double _t_76_; double _t_75_; double _t_95_; double _t_94_; double _t_110_; double _t_109_; double _t_90_; double _t_119_; double _t_117_; double _t_116_; double _t_124_; double _t_122_; double _t_55_; double _t_54_; double _t_53_; double _t_64_; double _t_62_; double _t_61_; double _t_69_; double _t_67_; double _t_73_; double _t_72_; double _t_82_; double _t_80_; double _t_79_; double _t_87_; double _t_85_; double _t_52_; double _t_92_; double _t_91_; double _t_101_; double _t_99_; double _t_98_; double _t_106_; double _t_104_; double _t_51_; _t_12_ = -u2[i][j-2][k+2]; _t_12_ += u2[i][j+2][k+2]; _t_13_ = -u2[i][j-1][k+2]; _t_13_ += u2[i][j+1][k+2]; _t_10_ = c1 * _t_13_; _t_10_ += c2 * _t_12_; _t_19_ = -u1[i][j-2][k-2]; _t_19_ += u1[i][j+2][k-2]; _t_17_ = c2 * _t_19_; _t_20_ = -u1[i][j-1][k-2]; _t_20_ += u1[i][j+1][k-2]; _t_17_ += c1 * _t_20_; _t_24_ = -u2[i][j-2][k-2]; _t_24_ += u2[i][j+2][k-2]; _t_22_ = c2 * _t_24_; _t_25_ = -u2[i][j-1][k-2]; _t_25_ += u2[i][j+1][k-2]; _t_22_ += c1 * _t_25_; _t_32_ = -u1[i][j-2][k+1]; _t_32_ += u1[i][j+2][k+1]; _t_30_ = c2 * _t_32_; _t_33_ = -u1[i][j-1][k+1]; _t_33_ += u1[i][j+1][k+1]; _t_30_ += c1 * _t_33_; _t_37_ = -u2[i][j-2][k+1]; _t_37_ += u2[i][j+2][k+1]; _t_35_ = c2 * _t_37_; _t_38_ = -u2[i][j-1][k+1]; _t_38_ += u2[i][j+1][k+1]; _t_35_ += c1 * _t_38_; _t_44_ = -u1[i][j-2][k-1]; _t_44_ += u1[i][j+2][k-1]; _t_42_ = c2 * _t_44_; _t_45_ = -u1[i][j-1][k-1]; _t_45_ += u1[i][j+1][k-1]; _t_42_ += c1 * _t_45_; _t_49_ = -u2[i][j-2][k-1]; _t_49_ += u2[i][j+2][k-1]; _t_47_ = c2 * _t_49_; _t_50_ = -u2[i][j-1][k-1]; _t_50_ += u2[i][j+1][k-1]; _t_47_ += c1 * _t_50_; _t_7_ = -u1[i][j-2][k+2]; _t_7_ += u1[i][j+2][k+2]; _t_5_ = c2 * _t_7_; _t_8_ = -u1[i][j-1][k+2]; _t_8_ += u1[i][j+1][k+2]; _t_5_ += c1 * _t_8_; _t_23_ = la[i][j][k-2] * met2[i][j][k-2]; _t_153_ = 2.0 * mu[i][j][k-2]; _t_153_ += la[i][j][k-2]; _t_152_ = _t_153_ * met2[i][j][k-2]; _t_21_ = _t_23_ * met1[i][j][k-2]; _t_1_ = _t_21_ * _t_22_; _t_150_ = _t_152_ * met1[i][j][k-2]; _t_18_ = mu[i][j][k-2] * met3[i][j][k-2]; _t_16_ = _t_18_ * met1[i][j][k-2]; _t_15_ = _t_16_ * _t_17_; _t_158_ = mu[i][j][k-2] * met3[i][j][k-2]; _t_156_ = _t_158_ * met1[i][j][k-2]; _t_14_ = _t_15_ * stry[j]; _t_1_ += _t_14_ * strx[i]; _t_48_ = la[i][j][k-1] * met2[i][j][k-1]; _t_192_ = 2.0 * mu[i][j][k-1]; _t_192_ += la[i][j][k-1]; _t_191_ = _t_192_ * met2[i][j][k-1]; _t_46_ = _t_48_ * met1[i][j][k-1]; _t_26_ = _t_46_ * _t_47_; _t_189_ = _t_191_ * met1[i][j][k-1]; _t_43_ = mu[i][j][k-1] * met3[i][j][k-1]; _t_41_ = _t_43_ * met1[i][j][k-1]; _t_40_ = _t_41_ * _t_42_; _t_39_ = _t_40_ * stry[j]; _t_26_ += _t_39_ * strx[i]; _t_197_ = mu[i][j][k-1] * met3[i][j][k-1]; _t_195_ = _t_197_ * met1[i][j][k-1]; _t_36_ = la[i][j][k+1] * met2[i][j][k+1]; _t_173_ = 2.0 * mu[i][j][k+1]; _t_173_ += la[i][j][k+1]; _t_172_ = _t_173_ * met2[i][j][k+1]; _t_34_ = _t_36_ * met1[i][j][k+1]; _t_26_ += _t_34_ * _t_35_; _t_170_ = _t_172_ * met1[i][j][k+1]; _t_31_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_29_ = _t_31_ * met1[i][j][k+1]; _t_28_ = _t_29_ * _t_30_; _t_178_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_176_ = _t_178_ * met1[i][j][k+1]; _t_27_ = _t_28_ * stry[j-2]; _t_26_ += _t_27_ * strx[i]; _t_0_ = c1 * _t_26_; _t_11_ = la[i][j][k+2] * met2[i][j][k+2]; _t_134_ = 2.0 * mu[i][j][k+2]; _t_134_ += la[i][j][k+2]; _t_133_ = _t_134_ * met2[i][j][k+2]; _t_9_ = _t_11_ * met1[i][j][k+2]; _t_1_ += _t_9_ * _t_10_; _t_131_ = _t_133_ * met1[i][j][k+2]; _t_6_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_4_ = _t_6_ * met1[i][j][k+2]; _t_3_ = _t_4_ * _t_5_; _t_139_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_137_ = _t_139_ * met1[i][j][k+2]; _t_2_ = _t_3_ * stry[j+2]; _t_1_ += _t_2_ * strx[i]; _t_0_ += c2 * _t_1_; r1ic0jc0kc0 += _t_0_; _t_145_ = mu[i][j][k+2] * met4[i][j][k+2]; _t_143_ = _t_145_ * met1[i][j][k+2]; _t_164_ = mu[i][j][k-2] * met4[i][j][k-2]; _t_162_ = _t_164_ * met1[i][j][k-2]; _t_184_ = mu[i][j][k+1] * met4[i][j][k+1]; _t_182_ = _t_184_ * met1[i][j][k+1]; _t_203_ = mu[i][j][k-1] * met4[i][j][k-1]; _t_201_ = _t_203_ * met1[i][j][k-1]; _t_135_ = -u1[i-2][j][k+2]; _t_77_ = u1[i-2][j][k+2]; _t_135_ += u1[i+2][j][k+2]; _t_132_ = c2 * _t_135_; _t_59_ = u1[i+2][j][k+2]; _t_77_ -= u1[i-2][j][k-2]; _t_154_ = -u1[i-2][j][k-2]; _t_59_ -= u1[i+2][j][k-2]; _t_154_ += u1[i+2][j][k-2]; _t_151_ = c2 * _t_154_; _t_56_ = c2 * _t_59_; _t_74_ = c2 * _t_77_; _t_136_ = -u1[i-1][j][k+2]; _t_114_ = u1[i-1][j][k+2]; _t_136_ += u1[i+1][j][k+2]; _t_132_ += c1 * _t_136_; _t_130_ = _t_131_ * _t_132_; _t_129_ = _t_130_ * strx[i]; _t_128_ = _t_129_ * stry[j]; _t_96_ = u1[i+1][j][k+2]; _t_114_ -= u1[i-1][j][k-2]; _t_155_ = -u1[i-1][j][k-2]; _t_96_ -= u1[i+1][j][k-2]; _t_155_ += u1[i+1][j][k-2]; _t_151_ += c1 * _t_155_; _t_149_ = _t_150_ * _t_151_; _t_148_ = _t_149_ * strx[i]; _t_128_ += _t_148_ * stry[j]; _t_111_ = c2 * _t_114_; _t_93_ = c2 * _t_96_; _t_140_ = -u2[i-2][j][k+2]; _t_83_ = u2[i-2][j][k+2]; _t_140_ += u2[i+2][j][k+2]; _t_138_ = c2 * _t_140_; _t_65_ = u2[i+2][j][k+2]; _t_83_ -= u2[i-2][j][k-2]; _t_159_ = -u2[i-2][j][k-2]; _t_65_ -= u2[i+2][j][k-2]; _t_159_ += u2[i+2][j][k-2]; _t_157_ = c2 * _t_159_; _t_63_ = c2 * _t_65_; _t_81_ = c2 * _t_83_; _t_141_ = -u2[i-1][j][k+2]; _t_120_ = u2[i-1][j][k+2]; _t_141_ += u2[i+1][j][k+2]; _t_138_ += c1 * _t_141_; _t_128_ += _t_137_ * _t_138_; _t_102_ = u2[i+1][j][k+2]; _t_120_ -= u2[i-1][j][k-2]; _t_160_ = -u2[i-1][j][k-2]; _t_102_ -= u2[i+1][j][k-2]; _t_160_ += u2[i+1][j][k-2]; _t_157_ += c1 * _t_160_; _t_128_ += _t_156_ * _t_157_; _t_100_ = c2 * _t_102_; _t_118_ = c2 * _t_120_; _t_146_ = -u3[i-2][j][k+2]; _t_88_ = u3[i-2][j][k+2]; _t_146_ += u3[i+2][j][k+2]; _t_144_ = c2 * _t_146_; _t_70_ = u3[i+2][j][k+2]; _t_88_ -= u3[i-2][j][k-2]; _t_165_ = -u3[i-2][j][k-2]; _t_70_ -= u3[i+2][j][k-2]; _t_165_ += u3[i+2][j][k-2]; _t_163_ = c2 * _t_165_; _t_68_ = c2 * _t_70_; _t_86_ = c2 * _t_88_; _t_147_ = -u3[i-1][j][k+2]; _t_125_ = u3[i-1][j][k+2]; _t_147_ += u3[i+1][j][k+2]; _t_144_ += c1 * _t_147_; _t_142_ = _t_143_ * _t_144_; _t_128_ += _t_142_ * stry[j]; _t_107_ = u3[i+1][j][k+2]; _t_125_ -= u3[i-1][j][k-2]; _t_166_ = -u3[i-1][j][k-2]; _t_107_ -= u3[i+1][j][k-2]; _t_166_ += u3[i+1][j][k-2]; _t_163_ += c1 * _t_166_; _t_161_ = _t_162_ * _t_163_; _t_128_ += _t_161_ * stry[j]; _t_127_ = c2 * _t_128_; _t_105_ = c2 * _t_107_; _t_123_ = c2 * _t_125_; _t_174_ = -u1[i-2][j][k+1]; _t_78_ = u1[i-2][j][k+1]; _t_174_ += u1[i+2][j][k+1]; _t_171_ = c2 * _t_174_; _t_60_ = u1[i+2][j][k+1]; _t_78_ -= u1[i-2][j][k-1]; _t_74_ += c1 * _t_78_; _t_193_ = -u1[i-2][j][k-1]; _t_60_ -= u1[i+2][j][k-1]; _t_56_ += c1 * _t_60_; _t_193_ += u1[i+2][j][k-1]; _t_190_ = c2 * _t_193_; _t_175_ = -u1[i-1][j][k+1]; _t_115_ = u1[i-1][j][k+1]; _t_175_ += u1[i+1][j][k+1]; _t_171_ += c1 * _t_175_; _t_169_ = _t_170_ * _t_171_; _t_97_ = u1[i+1][j][k+1]; _t_168_ = _t_169_ * strx[i+2]; _t_167_ = _t_168_ * stry[j]; _t_115_ -= u1[i-1][j][k-1]; _t_111_ += c1 * _t_115_; _t_194_ = -u1[i-1][j][k-1]; _t_97_ -= u1[i+1][j][k-1]; _t_93_ += c1 * _t_97_; _t_194_ += u1[i+1][j][k-1]; _t_190_ += c1 * _t_194_; _t_188_ = _t_189_ * _t_190_; _t_187_ = _t_188_ * strx[i-2]; _t_167_ += _t_187_ * stry[j]; _t_179_ = -u2[i-2][j][k+1]; _t_84_ = u2[i-2][j][k+1]; _t_179_ += u2[i+2][j][k+1]; _t_177_ = c2 * _t_179_; _t_66_ = u2[i+2][j][k+1]; _t_84_ -= u2[i-2][j][k-1]; _t_81_ += c1 * _t_84_; _t_198_ = -u2[i-2][j][k-1]; _t_66_ -= u2[i+2][j][k-1]; _t_63_ += c1 * _t_66_; _t_198_ += u2[i+2][j][k-1]; _t_196_ = c2 * _t_198_; _t_180_ = -u2[i-1][j][k+1]; _t_121_ = u2[i-1][j][k+1]; _t_180_ += u2[i+1][j][k+1]; _t_177_ += c1 * _t_180_; _t_167_ += _t_176_ * _t_177_; _t_103_ = u2[i+1][j][k+1]; _t_121_ -= u2[i-1][j][k-1]; _t_118_ += c1 * _t_121_; _t_199_ = -u2[i-1][j][k-1]; _t_103_ -= u2[i+1][j][k-1]; _t_100_ += c1 * _t_103_; _t_199_ += u2[i+1][j][k-1]; _t_196_ += c1 * _t_199_; _t_167_ += _t_195_ * _t_196_; _t_185_ = -u3[i-2][j][k+1]; _t_89_ = u3[i-2][j][k+1]; _t_185_ += u3[i+2][j][k+1]; _t_183_ = c2 * _t_185_; _t_71_ = u3[i+2][j][k+1]; _t_89_ -= u3[i-2][j][k-1]; _t_86_ += c1 * _t_89_; _t_204_ = -u3[i-2][j][k-1]; _t_71_ -= u3[i+2][j][k-1]; _t_68_ += c1 * _t_71_; _t_204_ += u3[i+2][j][k-1]; _t_202_ = c2 * _t_204_; _t_186_ = -u3[i-1][j][k+1]; _t_126_ = u3[i-1][j][k+1]; _t_186_ += u3[i+1][j][k+1]; _t_183_ += c1 * _t_186_; _t_181_ = _t_182_ * _t_183_; _t_167_ += _t_181_ * stry[j]; _t_108_ = u3[i+1][j][k+1]; _t_126_ -= u3[i-1][j][k-1]; _t_123_ += c1 * _t_126_; _t_205_ = -u3[i-1][j][k-1]; _t_108_ -= u3[i+1][j][k-1]; _t_105_ += c1 * _t_108_; _t_205_ += u3[i+1][j][k-1]; _t_202_ += c1 * _t_205_; _t_200_ = _t_201_ * _t_202_; _t_167_ += _t_200_ * stry[j]; _t_127_ += c1 * _t_167_; r1ic0jc0kc0 += _t_127_; _t_113_ = 2.0 * mu[i-1][j][k]; _t_113_ += la[i-1][j][k]; _t_112_ = _t_113_ * met2[i-1][j][k]; _t_58_ = 2.0 * mu[i+2][j][k]; _t_58_ += la[i+2][j][k]; _t_57_ = _t_58_ * met2[i+2][j][k]; _t_76_ = 2.0 * mu[i-2][j][k]; _t_76_ += la[i-2][j][k]; _t_75_ = _t_76_ * met2[i-2][j][k]; _t_95_ = 2.0 * mu[i+1][j][k]; _t_95_ += la[i+1][j][k]; _t_94_ = _t_95_ * met2[i+1][j][k]; _t_110_ = _t_112_ * met1[i-1][j][k]; _t_109_ = _t_110_ * _t_111_; _t_90_ = _t_109_ * strx[i]; _t_119_ = la[i-1][j][k] * met3[i-1][j][k]; _t_117_ = _t_119_ * met1[i-1][j][k]; _t_116_ = _t_117_ * _t_118_; _t_90_ += _t_116_ * stry[j]; _t_124_ = la[i-1][j][k] * met4[i-1][j][k]; _t_122_ = _t_124_ * met1[i-1][j][k]; _t_90_ += _t_122_ * _t_123_; _t_55_ = _t_57_ * met1[i+2][j][k]; _t_54_ = _t_55_ * _t_56_; _t_53_ = _t_54_ * strx[i]; _t_64_ = la[i+2][j][k] * met3[i+2][j][k]; _t_62_ = _t_64_ * met1[i+2][j][k]; _t_61_ = _t_62_ * _t_63_; _t_53_ += _t_61_ * stry[j]; _t_69_ = la[i+2][j][k] * met4[i+2][j][k]; _t_67_ = _t_69_ * met1[i+2][j][k]; _t_53_ += _t_67_ * _t_68_; _t_73_ = _t_75_ * met1[i-2][j][k]; _t_72_ = _t_73_ * _t_74_; _t_53_ += _t_72_ * strx[i]; _t_82_ = la[i-2][j][k] * met3[i-2][j][k]; _t_80_ = _t_82_ * met1[i-2][j][k]; _t_79_ = _t_80_ * _t_81_; _t_53_ += _t_79_ * stry[j]; _t_87_ = la[i-2][j][k] * met4[i-2][j][k]; _t_85_ = _t_87_ * met1[i-2][j][k]; _t_53_ += _t_85_ * _t_86_; _t_52_ = c2 * _t_53_; _t_92_ = _t_94_ * met1[i+1][j][k]; _t_91_ = _t_92_ * _t_93_; _t_90_ += _t_91_ * strx[i]; _t_101_ = la[i+1][j][k] * met3[i+1][j][k]; _t_99_ = _t_101_ * met1[i+1][j][k]; _t_98_ = _t_99_ * _t_100_; _t_90_ += _t_98_ * stry[j]; _t_106_ = la[i+1][j][k] * met4[i+1][j][k]; _t_104_ = _t_106_ * met1[i+1][j][k]; _t_90_ += _t_104_ * _t_105_; _t_52_ += c1 * _t_90_; _t_51_ = _t_52_ * stry[j]; r1ic0jc0kc0 += _t_51_; r1[i][j][k] = r1ic0jc0kc0; } } } __global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_i= (int)(blockDim.z); int i0 = (int)(blockIdx.z)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.z); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) { double _t_12_; double _t_13_; double _t_10_; double _t_19_; double _t_17_; double _t_20_; double _t_24_; double _t_22_; double _t_25_; double _t_32_; double _t_30_; double _t_33_; double _t_37_; double _t_35_; double _t_38_; double _t_44_; double _t_42_; double _t_45_; double _t_49_; double _t_47_; double _t_50_; double _t_7_; double _t_5_; double _t_8_; double _t_23_; double _t_21_; double _t_1_; double _t_18_; double _t_16_; double _t_15_; double _t_60_; double _t_58_; double _t_14_; double _t_48_; double _t_46_; double _t_26_; double _t_43_; double _t_41_; double _t_40_; double _t_39_; double _t_71_; double _t_69_; double _t_36_; double _t_34_; double _t_31_; double _t_29_; double _t_28_; double _t_66_; double _t_64_; double _t_27_; double _t_0_; double _t_11_; double _t_9_; double _t_6_; double _t_4_; double _t_3_; double _t_55_; double _t_53_; double _t_2_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_56_; double _t_83_; double _t_61_; double _t_81_; double _t_78_; double _t_54_; double _t_59_; double _t_76_; double _t_57_; double _t_94_; double _t_62_; double _t_92_; double _t_52_; double _t_89_; double _t_87_; double _t_51_; double _t_79_; double _t_72_; double _t_84_; double _t_70_; double _t_67_; double _t_65_; double _t_68_; double _t_95_; double _t_73_; double _t_63_; double _t_90_; double _t_82_; double _t_80_; double _t_74_; double _t_93_; double _t_91_; double _t_85_; double _t_88_; double _t_86_; double _t_77_; double _t_75_; _t_12_ = -u2[i][j+2][k-2]; _t_12_ += u2[i][j+2][k+2]; _t_13_ = -u2[i][j+2][k-1]; _t_13_ += u2[i][j+2][k+1]; _t_10_ = c1 * _t_13_; _t_10_ += c2 * _t_12_; _t_19_ = -u1[i][j-2][k-2]; _t_19_ += u1[i][j-2][k+2]; _t_17_ = c2 * _t_19_; _t_20_ = -u1[i][j-2][k-1]; _t_20_ += u1[i][j-2][k+1]; _t_17_ += c1 * _t_20_; _t_24_ = -u2[i][j-2][k-2]; _t_24_ += u2[i][j-2][k+2]; _t_22_ = c2 * _t_24_; _t_25_ = -u2[i][j-2][k-1]; _t_25_ += u2[i][j-2][k+1]; _t_22_ += c1 * _t_25_; _t_32_ = -u1[i][j+1][k-2]; _t_32_ += u1[i][j+1][k+2]; _t_30_ = c2 * _t_32_; _t_33_ = -u1[i][j+1][k-1]; _t_33_ += u1[i][j+1][k+1]; _t_30_ += c1 * _t_33_; _t_37_ = -u2[i][j+1][k-2]; _t_37_ += u2[i][j+1][k+2]; _t_35_ = c2 * _t_37_; _t_38_ = -u2[i][j+1][k-1]; _t_38_ += u2[i][j+1][k+1]; _t_35_ += c1 * _t_38_; _t_44_ = -u1[i][j-1][k-2]; _t_44_ += u1[i][j-1][k+2]; _t_42_ = c2 * _t_44_; _t_45_ = -u1[i][j-1][k-1]; _t_45_ += u1[i][j-1][k+1]; _t_42_ += c1 * _t_45_; _t_49_ = -u2[i][j-1][k-2]; _t_49_ += u2[i][j-1][k+2]; _t_47_ = c2 * _t_49_; _t_50_ = -u2[i][j-1][k-1]; _t_50_ += u2[i][j-1][k+1]; _t_47_ += c1 * _t_50_; _t_7_ = -u1[i][j+2][k-2]; _t_7_ += u1[i][j+2][k+2]; _t_5_ = c2 * _t_7_; _t_8_ = -u1[i][j+2][k-1]; _t_8_ += u1[i][j+2][k+1]; _t_5_ += c1 * _t_8_; _t_23_ = mu[i][j-2][k] * met2[i][j-2][k]; _t_21_ = _t_23_ * met1[i][j-2][k]; _t_1_ = _t_21_ * _t_22_; _t_18_ = mu[i][j-2][k] * met3[i][j-2][k]; _t_16_ = _t_18_ * met1[i][j-2][k]; _t_15_ = _t_16_ * _t_17_; _t_60_ = mu[i][j-2][k] * met1[i][j-2][k]; _t_58_ = _t_60_ * met1[i][j-2][k]; _t_14_ = _t_15_ * stry[j]; _t_1_ += _t_14_ * strx[i]; _t_48_ = mu[i][j-1][k] * met2[i][j-1][k]; _t_46_ = _t_48_ * met1[i][j-1][k]; _t_26_ = _t_46_ * _t_47_; _t_43_ = mu[i][j-1][k] * met3[i][j-1][k]; _t_41_ = _t_43_ * met1[i][j-1][k]; _t_40_ = _t_41_ * _t_42_; _t_39_ = _t_40_ * stry[j]; _t_26_ += _t_39_ * strx[i]; _t_71_ = mu[i][j-1][k] * met1[i][j-1][k]; _t_69_ = _t_71_ * met1[i][j-1][k]; _t_36_ = mu[i][j+1][k] * met2[i][j+1][k]; _t_34_ = _t_36_ * met1[i][j+1][k]; _t_26_ += _t_34_ * _t_35_; _t_31_ = mu[i][j+1][k] * met3[i][j+1][k]; _t_29_ = _t_31_ * met1[i][j+1][k]; _t_28_ = _t_29_ * _t_30_; _t_66_ = mu[i][j+1][k] * met1[i][j+1][k]; _t_64_ = _t_66_ * met1[i][j+1][k]; _t_27_ = _t_28_ * stry[j-1]; _t_26_ += _t_27_ * strx[i]; _t_0_ = c1 * _t_26_; _t_11_ = mu[i][j+2][k] * met2[i][j+2][k]; _t_9_ = _t_11_ * met1[i][j+2][k]; _t_1_ += _t_9_ * _t_10_; _t_6_ = mu[i][j+2][k] * met3[i][j+2][k]; _t_4_ = _t_6_ * met1[i][j+2][k]; _t_3_ = _t_4_ * _t_5_; _t_55_ = mu[i][j+2][k] * met1[i][j+2][k]; _t_53_ = _t_55_ * met1[i][j+2][k]; _t_2_ = _t_3_ * stry[j+1]; _t_1_ += _t_2_ * strx[i]; _t_0_ += c2 * _t_1_; r1ic0jc0kc0 += _t_0_; _t_56_ = -u2[i-2][j+2][k]; _t_83_ = u2[i-2][j+2][k]; _t_83_ -= u2[i-2][j-2][k]; _t_61_ = -u2[i-2][j-2][k]; _t_81_ = c2 * _t_83_; _t_61_ += u2[i+2][j-2][k]; _t_78_ = -u2[i+2][j-2][k]; _t_56_ += u2[i+2][j+2][k]; _t_78_ += u2[i+2][j+2][k]; _t_54_ = c2 * _t_56_; _t_59_ = c2 * _t_61_; _t_76_ = c2 * _t_78_; _t_57_ = -u2[i-1][j+2][k]; _t_94_ = u2[i-1][j+2][k]; _t_94_ -= u2[i-1][j-2][k]; _t_62_ = -u2[i-1][j-2][k]; _t_92_ = c2 * _t_94_; _t_62_ += u2[i+1][j-2][k]; _t_59_ += c1 * _t_62_; _t_52_ = _t_58_ * _t_59_; _t_89_ = -u2[i+1][j-2][k]; _t_57_ += u2[i+1][j+2][k]; _t_54_ += c1 * _t_57_; _t_52_ += _t_53_ * _t_54_; _t_89_ += u2[i+1][j+2][k]; _t_87_ = c2 * _t_89_; _t_51_ = c2 * _t_52_; _t_79_ = -u2[i+2][j-1][k]; _t_72_ = u2[i+2][j-1][k]; _t_72_ -= u2[i-2][j-1][k]; _t_84_ = -u2[i-2][j-1][k]; _t_70_ = c2 * _t_72_; _t_84_ += u2[i-2][j+1][k]; _t_81_ += c1 * _t_84_; _t_67_ = -u2[i-2][j+1][k]; _t_67_ += u2[i+2][j+1][k]; _t_79_ += u2[i+2][j+1][k]; _t_76_ += c1 * _t_79_; _t_65_ = c2 * _t_67_; _t_68_ = -u2[i-1][j+1][k]; _t_95_ = u2[i-1][j+1][k]; _t_95_ -= u2[i-1][j-1][k]; _t_92_ += c1 * _t_95_; _t_73_ = -u2[i-1][j-1][k]; _t_73_ += u2[i+1][j-1][k]; _t_70_ += c1 * _t_73_; _t_63_ = _t_69_ * _t_70_; _t_90_ = -u2[i+1][j-1][k]; _t_68_ += u2[i+1][j+1][k]; _t_65_ += c1 * _t_68_; _t_63_ += _t_64_ * _t_65_; _t_51_ += c1 * _t_63_; _t_90_ += u2[i+1][j+1][k]; _t_87_ += c1 * _t_90_; _t_82_ = la[i-2][j][k] * met1[i-2][j][k]; _t_80_ = _t_82_ * met1[i-2][j][k]; _t_74_ = _t_80_ * _t_81_; _t_93_ = la[i-1][j][k] * met1[i-1][j][k]; _t_91_ = _t_93_ * met1[i-1][j][k]; _t_85_ = _t_91_ * _t_92_; _t_88_ = la[i+1][j][k] * met1[i+1][j][k]; _t_86_ = _t_88_ * met1[i+1][j][k]; _t_85_ += _t_86_ * _t_87_; _t_51_ += c1 * _t_85_; _t_77_ = la[i+2][j][k] * met1[i+2][j][k]; _t_75_ = _t_77_ * met1[i+2][j][k]; _t_74_ += _t_75_ * _t_76_; _t_51_ += c2 * _t_74_; r1ic0jc0kc0 += _t_51_; r1[i][j][k] = r1ic0jc0kc0; } } extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) { double *r1; hipMalloc (&r1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for r1\n"); hipMemcpy (r1, h_r1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u1; hipMalloc (&u1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u1\n"); hipMemcpy (u1, h_u1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u2; hipMalloc (&u2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u2\n"); hipMemcpy (u2, h_u2, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *u3; hipMalloc (&u3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u3\n"); hipMemcpy (u3, h_u3, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *mu; hipMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); hipMemcpy (mu, h_mu, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *la; hipMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); hipMemcpy (la, h_la, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *met1; hipMalloc (&met1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met1\n"); hipMemcpy (met1, h_met1, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *met2; hipMalloc (&met2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met2\n"); hipMemcpy (met2, h_met2, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *met3; hipMalloc (&met3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met3\n"); hipMemcpy (met3, h_met3, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *met4; hipMalloc (&met4, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met4\n"); hipMemcpy (met4, h_met4, sizeof(double)*N*N*N, hipMemcpyHostToDevice); double *strx; hipMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); hipMemcpy (strx, h_strx, sizeof(double)*N, hipMemcpyHostToDevice); double *stry; hipMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); hipMemcpy (stry, h_stry, sizeof(double)*N, hipMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); hipLaunchKernelGGL(( curvi_1) , dim3(gridconfig), dim3(blockconfig), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); dim3 blockconfig_1 (16, 2, 2); dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z)); hipLaunchKernelGGL(( curvi_2) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); hipMemcpy (h_r1, r1, sizeof(double)*N*N*N, hipMemcpyDeviceToHost); }
06aa49ba0b9f5f4d06c648d0011aaf1a5627d044.cu
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3) { for (int i=2; i<=N-3; i++) { double _t_12_; double _t_13_; double _t_10_; double _t_19_; double _t_17_; double _t_20_; double _t_24_; double _t_22_; double _t_25_; double _t_32_; double _t_30_; double _t_33_; double _t_37_; double _t_35_; double _t_38_; double _t_44_; double _t_42_; double _t_45_; double _t_49_; double _t_47_; double _t_50_; double _t_7_; double _t_5_; double _t_8_; double _t_23_; double _t_153_; double _t_152_; double _t_21_; double _t_1_; double _t_150_; double _t_18_; double _t_16_; double _t_15_; double _t_158_; double _t_156_; double _t_14_; double _t_48_; double _t_192_; double _t_191_; double _t_46_; double _t_26_; double _t_189_; double _t_43_; double _t_41_; double _t_40_; double _t_39_; double _t_197_; double _t_195_; double _t_36_; double _t_173_; double _t_172_; double _t_34_; double _t_170_; double _t_31_; double _t_29_; double _t_28_; double _t_178_; double _t_176_; double _t_27_; double _t_0_; double _t_11_; double _t_134_; double _t_133_; double _t_9_; double _t_131_; double _t_6_; double _t_4_; double _t_3_; double _t_139_; double _t_137_; double _t_2_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_145_; double _t_143_; double _t_164_; double _t_162_; double _t_184_; double _t_182_; double _t_203_; double _t_201_; double _t_135_; double _t_77_; double _t_132_; double _t_59_; double _t_154_; double _t_151_; double _t_56_; double _t_74_; double _t_136_; double _t_114_; double _t_130_; double _t_129_; double _t_128_; double _t_96_; double _t_155_; double _t_149_; double _t_148_; double _t_111_; double _t_93_; double _t_140_; double _t_83_; double _t_138_; double _t_65_; double _t_159_; double _t_157_; double _t_63_; double _t_81_; double _t_141_; double _t_120_; double _t_102_; double _t_160_; double _t_100_; double _t_118_; double _t_146_; double _t_88_; double _t_144_; double _t_70_; double _t_165_; double _t_163_; double _t_68_; double _t_86_; double _t_147_; double _t_125_; double _t_142_; double _t_107_; double _t_166_; double _t_161_; double _t_127_; double _t_105_; double _t_123_; double _t_174_; double _t_78_; double _t_171_; double _t_60_; double _t_193_; double _t_190_; double _t_175_; double _t_115_; double _t_169_; double _t_97_; double _t_168_; double _t_167_; double _t_194_; double _t_188_; double _t_187_; double _t_179_; double _t_84_; double _t_177_; double _t_66_; double _t_198_; double _t_196_; double _t_180_; double _t_121_; double _t_103_; double _t_199_; double _t_185_; double _t_89_; double _t_183_; double _t_71_; double _t_204_; double _t_202_; double _t_186_; double _t_126_; double _t_181_; double _t_108_; double _t_205_; double _t_200_; double _t_113_; double _t_112_; double _t_58_; double _t_57_; double _t_76_; double _t_75_; double _t_95_; double _t_94_; double _t_110_; double _t_109_; double _t_90_; double _t_119_; double _t_117_; double _t_116_; double _t_124_; double _t_122_; double _t_55_; double _t_54_; double _t_53_; double _t_64_; double _t_62_; double _t_61_; double _t_69_; double _t_67_; double _t_73_; double _t_72_; double _t_82_; double _t_80_; double _t_79_; double _t_87_; double _t_85_; double _t_52_; double _t_92_; double _t_91_; double _t_101_; double _t_99_; double _t_98_; double _t_106_; double _t_104_; double _t_51_; _t_12_ = -u2[i][j-2][k+2]; _t_12_ += u2[i][j+2][k+2]; _t_13_ = -u2[i][j-1][k+2]; _t_13_ += u2[i][j+1][k+2]; _t_10_ = c1 * _t_13_; _t_10_ += c2 * _t_12_; _t_19_ = -u1[i][j-2][k-2]; _t_19_ += u1[i][j+2][k-2]; _t_17_ = c2 * _t_19_; _t_20_ = -u1[i][j-1][k-2]; _t_20_ += u1[i][j+1][k-2]; _t_17_ += c1 * _t_20_; _t_24_ = -u2[i][j-2][k-2]; _t_24_ += u2[i][j+2][k-2]; _t_22_ = c2 * _t_24_; _t_25_ = -u2[i][j-1][k-2]; _t_25_ += u2[i][j+1][k-2]; _t_22_ += c1 * _t_25_; _t_32_ = -u1[i][j-2][k+1]; _t_32_ += u1[i][j+2][k+1]; _t_30_ = c2 * _t_32_; _t_33_ = -u1[i][j-1][k+1]; _t_33_ += u1[i][j+1][k+1]; _t_30_ += c1 * _t_33_; _t_37_ = -u2[i][j-2][k+1]; _t_37_ += u2[i][j+2][k+1]; _t_35_ = c2 * _t_37_; _t_38_ = -u2[i][j-1][k+1]; _t_38_ += u2[i][j+1][k+1]; _t_35_ += c1 * _t_38_; _t_44_ = -u1[i][j-2][k-1]; _t_44_ += u1[i][j+2][k-1]; _t_42_ = c2 * _t_44_; _t_45_ = -u1[i][j-1][k-1]; _t_45_ += u1[i][j+1][k-1]; _t_42_ += c1 * _t_45_; _t_49_ = -u2[i][j-2][k-1]; _t_49_ += u2[i][j+2][k-1]; _t_47_ = c2 * _t_49_; _t_50_ = -u2[i][j-1][k-1]; _t_50_ += u2[i][j+1][k-1]; _t_47_ += c1 * _t_50_; _t_7_ = -u1[i][j-2][k+2]; _t_7_ += u1[i][j+2][k+2]; _t_5_ = c2 * _t_7_; _t_8_ = -u1[i][j-1][k+2]; _t_8_ += u1[i][j+1][k+2]; _t_5_ += c1 * _t_8_; _t_23_ = la[i][j][k-2] * met2[i][j][k-2]; _t_153_ = 2.0 * mu[i][j][k-2]; _t_153_ += la[i][j][k-2]; _t_152_ = _t_153_ * met2[i][j][k-2]; _t_21_ = _t_23_ * met1[i][j][k-2]; _t_1_ = _t_21_ * _t_22_; _t_150_ = _t_152_ * met1[i][j][k-2]; _t_18_ = mu[i][j][k-2] * met3[i][j][k-2]; _t_16_ = _t_18_ * met1[i][j][k-2]; _t_15_ = _t_16_ * _t_17_; _t_158_ = mu[i][j][k-2] * met3[i][j][k-2]; _t_156_ = _t_158_ * met1[i][j][k-2]; _t_14_ = _t_15_ * stry[j]; _t_1_ += _t_14_ * strx[i]; _t_48_ = la[i][j][k-1] * met2[i][j][k-1]; _t_192_ = 2.0 * mu[i][j][k-1]; _t_192_ += la[i][j][k-1]; _t_191_ = _t_192_ * met2[i][j][k-1]; _t_46_ = _t_48_ * met1[i][j][k-1]; _t_26_ = _t_46_ * _t_47_; _t_189_ = _t_191_ * met1[i][j][k-1]; _t_43_ = mu[i][j][k-1] * met3[i][j][k-1]; _t_41_ = _t_43_ * met1[i][j][k-1]; _t_40_ = _t_41_ * _t_42_; _t_39_ = _t_40_ * stry[j]; _t_26_ += _t_39_ * strx[i]; _t_197_ = mu[i][j][k-1] * met3[i][j][k-1]; _t_195_ = _t_197_ * met1[i][j][k-1]; _t_36_ = la[i][j][k+1] * met2[i][j][k+1]; _t_173_ = 2.0 * mu[i][j][k+1]; _t_173_ += la[i][j][k+1]; _t_172_ = _t_173_ * met2[i][j][k+1]; _t_34_ = _t_36_ * met1[i][j][k+1]; _t_26_ += _t_34_ * _t_35_; _t_170_ = _t_172_ * met1[i][j][k+1]; _t_31_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_29_ = _t_31_ * met1[i][j][k+1]; _t_28_ = _t_29_ * _t_30_; _t_178_ = mu[i][j][k+1] * met3[i][j][k+1]; _t_176_ = _t_178_ * met1[i][j][k+1]; _t_27_ = _t_28_ * stry[j-2]; _t_26_ += _t_27_ * strx[i]; _t_0_ = c1 * _t_26_; _t_11_ = la[i][j][k+2] * met2[i][j][k+2]; _t_134_ = 2.0 * mu[i][j][k+2]; _t_134_ += la[i][j][k+2]; _t_133_ = _t_134_ * met2[i][j][k+2]; _t_9_ = _t_11_ * met1[i][j][k+2]; _t_1_ += _t_9_ * _t_10_; _t_131_ = _t_133_ * met1[i][j][k+2]; _t_6_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_4_ = _t_6_ * met1[i][j][k+2]; _t_3_ = _t_4_ * _t_5_; _t_139_ = mu[i][j][k+2] * met3[i][j][k+2]; _t_137_ = _t_139_ * met1[i][j][k+2]; _t_2_ = _t_3_ * stry[j+2]; _t_1_ += _t_2_ * strx[i]; _t_0_ += c2 * _t_1_; r1ic0jc0kc0 += _t_0_; _t_145_ = mu[i][j][k+2] * met4[i][j][k+2]; _t_143_ = _t_145_ * met1[i][j][k+2]; _t_164_ = mu[i][j][k-2] * met4[i][j][k-2]; _t_162_ = _t_164_ * met1[i][j][k-2]; _t_184_ = mu[i][j][k+1] * met4[i][j][k+1]; _t_182_ = _t_184_ * met1[i][j][k+1]; _t_203_ = mu[i][j][k-1] * met4[i][j][k-1]; _t_201_ = _t_203_ * met1[i][j][k-1]; _t_135_ = -u1[i-2][j][k+2]; _t_77_ = u1[i-2][j][k+2]; _t_135_ += u1[i+2][j][k+2]; _t_132_ = c2 * _t_135_; _t_59_ = u1[i+2][j][k+2]; _t_77_ -= u1[i-2][j][k-2]; _t_154_ = -u1[i-2][j][k-2]; _t_59_ -= u1[i+2][j][k-2]; _t_154_ += u1[i+2][j][k-2]; _t_151_ = c2 * _t_154_; _t_56_ = c2 * _t_59_; _t_74_ = c2 * _t_77_; _t_136_ = -u1[i-1][j][k+2]; _t_114_ = u1[i-1][j][k+2]; _t_136_ += u1[i+1][j][k+2]; _t_132_ += c1 * _t_136_; _t_130_ = _t_131_ * _t_132_; _t_129_ = _t_130_ * strx[i]; _t_128_ = _t_129_ * stry[j]; _t_96_ = u1[i+1][j][k+2]; _t_114_ -= u1[i-1][j][k-2]; _t_155_ = -u1[i-1][j][k-2]; _t_96_ -= u1[i+1][j][k-2]; _t_155_ += u1[i+1][j][k-2]; _t_151_ += c1 * _t_155_; _t_149_ = _t_150_ * _t_151_; _t_148_ = _t_149_ * strx[i]; _t_128_ += _t_148_ * stry[j]; _t_111_ = c2 * _t_114_; _t_93_ = c2 * _t_96_; _t_140_ = -u2[i-2][j][k+2]; _t_83_ = u2[i-2][j][k+2]; _t_140_ += u2[i+2][j][k+2]; _t_138_ = c2 * _t_140_; _t_65_ = u2[i+2][j][k+2]; _t_83_ -= u2[i-2][j][k-2]; _t_159_ = -u2[i-2][j][k-2]; _t_65_ -= u2[i+2][j][k-2]; _t_159_ += u2[i+2][j][k-2]; _t_157_ = c2 * _t_159_; _t_63_ = c2 * _t_65_; _t_81_ = c2 * _t_83_; _t_141_ = -u2[i-1][j][k+2]; _t_120_ = u2[i-1][j][k+2]; _t_141_ += u2[i+1][j][k+2]; _t_138_ += c1 * _t_141_; _t_128_ += _t_137_ * _t_138_; _t_102_ = u2[i+1][j][k+2]; _t_120_ -= u2[i-1][j][k-2]; _t_160_ = -u2[i-1][j][k-2]; _t_102_ -= u2[i+1][j][k-2]; _t_160_ += u2[i+1][j][k-2]; _t_157_ += c1 * _t_160_; _t_128_ += _t_156_ * _t_157_; _t_100_ = c2 * _t_102_; _t_118_ = c2 * _t_120_; _t_146_ = -u3[i-2][j][k+2]; _t_88_ = u3[i-2][j][k+2]; _t_146_ += u3[i+2][j][k+2]; _t_144_ = c2 * _t_146_; _t_70_ = u3[i+2][j][k+2]; _t_88_ -= u3[i-2][j][k-2]; _t_165_ = -u3[i-2][j][k-2]; _t_70_ -= u3[i+2][j][k-2]; _t_165_ += u3[i+2][j][k-2]; _t_163_ = c2 * _t_165_; _t_68_ = c2 * _t_70_; _t_86_ = c2 * _t_88_; _t_147_ = -u3[i-1][j][k+2]; _t_125_ = u3[i-1][j][k+2]; _t_147_ += u3[i+1][j][k+2]; _t_144_ += c1 * _t_147_; _t_142_ = _t_143_ * _t_144_; _t_128_ += _t_142_ * stry[j]; _t_107_ = u3[i+1][j][k+2]; _t_125_ -= u3[i-1][j][k-2]; _t_166_ = -u3[i-1][j][k-2]; _t_107_ -= u3[i+1][j][k-2]; _t_166_ += u3[i+1][j][k-2]; _t_163_ += c1 * _t_166_; _t_161_ = _t_162_ * _t_163_; _t_128_ += _t_161_ * stry[j]; _t_127_ = c2 * _t_128_; _t_105_ = c2 * _t_107_; _t_123_ = c2 * _t_125_; _t_174_ = -u1[i-2][j][k+1]; _t_78_ = u1[i-2][j][k+1]; _t_174_ += u1[i+2][j][k+1]; _t_171_ = c2 * _t_174_; _t_60_ = u1[i+2][j][k+1]; _t_78_ -= u1[i-2][j][k-1]; _t_74_ += c1 * _t_78_; _t_193_ = -u1[i-2][j][k-1]; _t_60_ -= u1[i+2][j][k-1]; _t_56_ += c1 * _t_60_; _t_193_ += u1[i+2][j][k-1]; _t_190_ = c2 * _t_193_; _t_175_ = -u1[i-1][j][k+1]; _t_115_ = u1[i-1][j][k+1]; _t_175_ += u1[i+1][j][k+1]; _t_171_ += c1 * _t_175_; _t_169_ = _t_170_ * _t_171_; _t_97_ = u1[i+1][j][k+1]; _t_168_ = _t_169_ * strx[i+2]; _t_167_ = _t_168_ * stry[j]; _t_115_ -= u1[i-1][j][k-1]; _t_111_ += c1 * _t_115_; _t_194_ = -u1[i-1][j][k-1]; _t_97_ -= u1[i+1][j][k-1]; _t_93_ += c1 * _t_97_; _t_194_ += u1[i+1][j][k-1]; _t_190_ += c1 * _t_194_; _t_188_ = _t_189_ * _t_190_; _t_187_ = _t_188_ * strx[i-2]; _t_167_ += _t_187_ * stry[j]; _t_179_ = -u2[i-2][j][k+1]; _t_84_ = u2[i-2][j][k+1]; _t_179_ += u2[i+2][j][k+1]; _t_177_ = c2 * _t_179_; _t_66_ = u2[i+2][j][k+1]; _t_84_ -= u2[i-2][j][k-1]; _t_81_ += c1 * _t_84_; _t_198_ = -u2[i-2][j][k-1]; _t_66_ -= u2[i+2][j][k-1]; _t_63_ += c1 * _t_66_; _t_198_ += u2[i+2][j][k-1]; _t_196_ = c2 * _t_198_; _t_180_ = -u2[i-1][j][k+1]; _t_121_ = u2[i-1][j][k+1]; _t_180_ += u2[i+1][j][k+1]; _t_177_ += c1 * _t_180_; _t_167_ += _t_176_ * _t_177_; _t_103_ = u2[i+1][j][k+1]; _t_121_ -= u2[i-1][j][k-1]; _t_118_ += c1 * _t_121_; _t_199_ = -u2[i-1][j][k-1]; _t_103_ -= u2[i+1][j][k-1]; _t_100_ += c1 * _t_103_; _t_199_ += u2[i+1][j][k-1]; _t_196_ += c1 * _t_199_; _t_167_ += _t_195_ * _t_196_; _t_185_ = -u3[i-2][j][k+1]; _t_89_ = u3[i-2][j][k+1]; _t_185_ += u3[i+2][j][k+1]; _t_183_ = c2 * _t_185_; _t_71_ = u3[i+2][j][k+1]; _t_89_ -= u3[i-2][j][k-1]; _t_86_ += c1 * _t_89_; _t_204_ = -u3[i-2][j][k-1]; _t_71_ -= u3[i+2][j][k-1]; _t_68_ += c1 * _t_71_; _t_204_ += u3[i+2][j][k-1]; _t_202_ = c2 * _t_204_; _t_186_ = -u3[i-1][j][k+1]; _t_126_ = u3[i-1][j][k+1]; _t_186_ += u3[i+1][j][k+1]; _t_183_ += c1 * _t_186_; _t_181_ = _t_182_ * _t_183_; _t_167_ += _t_181_ * stry[j]; _t_108_ = u3[i+1][j][k+1]; _t_126_ -= u3[i-1][j][k-1]; _t_123_ += c1 * _t_126_; _t_205_ = -u3[i-1][j][k-1]; _t_108_ -= u3[i+1][j][k-1]; _t_105_ += c1 * _t_108_; _t_205_ += u3[i+1][j][k-1]; _t_202_ += c1 * _t_205_; _t_200_ = _t_201_ * _t_202_; _t_167_ += _t_200_ * stry[j]; _t_127_ += c1 * _t_167_; r1ic0jc0kc0 += _t_127_; _t_113_ = 2.0 * mu[i-1][j][k]; _t_113_ += la[i-1][j][k]; _t_112_ = _t_113_ * met2[i-1][j][k]; _t_58_ = 2.0 * mu[i+2][j][k]; _t_58_ += la[i+2][j][k]; _t_57_ = _t_58_ * met2[i+2][j][k]; _t_76_ = 2.0 * mu[i-2][j][k]; _t_76_ += la[i-2][j][k]; _t_75_ = _t_76_ * met2[i-2][j][k]; _t_95_ = 2.0 * mu[i+1][j][k]; _t_95_ += la[i+1][j][k]; _t_94_ = _t_95_ * met2[i+1][j][k]; _t_110_ = _t_112_ * met1[i-1][j][k]; _t_109_ = _t_110_ * _t_111_; _t_90_ = _t_109_ * strx[i]; _t_119_ = la[i-1][j][k] * met3[i-1][j][k]; _t_117_ = _t_119_ * met1[i-1][j][k]; _t_116_ = _t_117_ * _t_118_; _t_90_ += _t_116_ * stry[j]; _t_124_ = la[i-1][j][k] * met4[i-1][j][k]; _t_122_ = _t_124_ * met1[i-1][j][k]; _t_90_ += _t_122_ * _t_123_; _t_55_ = _t_57_ * met1[i+2][j][k]; _t_54_ = _t_55_ * _t_56_; _t_53_ = _t_54_ * strx[i]; _t_64_ = la[i+2][j][k] * met3[i+2][j][k]; _t_62_ = _t_64_ * met1[i+2][j][k]; _t_61_ = _t_62_ * _t_63_; _t_53_ += _t_61_ * stry[j]; _t_69_ = la[i+2][j][k] * met4[i+2][j][k]; _t_67_ = _t_69_ * met1[i+2][j][k]; _t_53_ += _t_67_ * _t_68_; _t_73_ = _t_75_ * met1[i-2][j][k]; _t_72_ = _t_73_ * _t_74_; _t_53_ += _t_72_ * strx[i]; _t_82_ = la[i-2][j][k] * met3[i-2][j][k]; _t_80_ = _t_82_ * met1[i-2][j][k]; _t_79_ = _t_80_ * _t_81_; _t_53_ += _t_79_ * stry[j]; _t_87_ = la[i-2][j][k] * met4[i-2][j][k]; _t_85_ = _t_87_ * met1[i-2][j][k]; _t_53_ += _t_85_ * _t_86_; _t_52_ = c2 * _t_53_; _t_92_ = _t_94_ * met1[i+1][j][k]; _t_91_ = _t_92_ * _t_93_; _t_90_ += _t_91_ * strx[i]; _t_101_ = la[i+1][j][k] * met3[i+1][j][k]; _t_99_ = _t_101_ * met1[i+1][j][k]; _t_98_ = _t_99_ * _t_100_; _t_90_ += _t_98_ * stry[j]; _t_106_ = la[i+1][j][k] * met4[i+1][j][k]; _t_104_ = _t_106_ * met1[i+1][j][k]; _t_90_ += _t_104_ * _t_105_; _t_52_ += c1 * _t_90_; _t_51_ = _t_52_ * stry[j]; r1ic0jc0kc0 += _t_51_; r1[i][j][k] = r1ic0jc0kc0; } } } __global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) { //Determing the block's indices int blockdim_k= (int)(blockDim.x); int k0 = (int)(blockIdx.x)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_i= (int)(blockDim.z); int i0 = (int)(blockIdx.z)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.z); double (*u1)[304][304] = (double (*)[304][304])in_u1; double (*u2)[304][304] = (double (*)[304][304])in_u2; double (*u3)[304][304] = (double (*)[304][304])in_u3; double (*mu)[304][304] = (double (*)[304][304])in_mu; double (*la)[304][304] = (double (*)[304][304])in_la; double (*r1)[304][304] = (double (*)[304][304])in_r1; double (*met1)[304][304] = (double (*)[304][304])in_met1; double (*met2)[304][304] = (double (*)[304][304])in_met2; double (*met3)[304][304] = (double (*)[304][304])in_met3; double (*met4)[304][304] = (double (*)[304][304])in_met4; if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) { double _t_12_; double _t_13_; double _t_10_; double _t_19_; double _t_17_; double _t_20_; double _t_24_; double _t_22_; double _t_25_; double _t_32_; double _t_30_; double _t_33_; double _t_37_; double _t_35_; double _t_38_; double _t_44_; double _t_42_; double _t_45_; double _t_49_; double _t_47_; double _t_50_; double _t_7_; double _t_5_; double _t_8_; double _t_23_; double _t_21_; double _t_1_; double _t_18_; double _t_16_; double _t_15_; double _t_60_; double _t_58_; double _t_14_; double _t_48_; double _t_46_; double _t_26_; double _t_43_; double _t_41_; double _t_40_; double _t_39_; double _t_71_; double _t_69_; double _t_36_; double _t_34_; double _t_31_; double _t_29_; double _t_28_; double _t_66_; double _t_64_; double _t_27_; double _t_0_; double _t_11_; double _t_9_; double _t_6_; double _t_4_; double _t_3_; double _t_55_; double _t_53_; double _t_2_; double r1ic0jc0kc0 = r1[i][j][k]; double _t_56_; double _t_83_; double _t_61_; double _t_81_; double _t_78_; double _t_54_; double _t_59_; double _t_76_; double _t_57_; double _t_94_; double _t_62_; double _t_92_; double _t_52_; double _t_89_; double _t_87_; double _t_51_; double _t_79_; double _t_72_; double _t_84_; double _t_70_; double _t_67_; double _t_65_; double _t_68_; double _t_95_; double _t_73_; double _t_63_; double _t_90_; double _t_82_; double _t_80_; double _t_74_; double _t_93_; double _t_91_; double _t_85_; double _t_88_; double _t_86_; double _t_77_; double _t_75_; _t_12_ = -u2[i][j+2][k-2]; _t_12_ += u2[i][j+2][k+2]; _t_13_ = -u2[i][j+2][k-1]; _t_13_ += u2[i][j+2][k+1]; _t_10_ = c1 * _t_13_; _t_10_ += c2 * _t_12_; _t_19_ = -u1[i][j-2][k-2]; _t_19_ += u1[i][j-2][k+2]; _t_17_ = c2 * _t_19_; _t_20_ = -u1[i][j-2][k-1]; _t_20_ += u1[i][j-2][k+1]; _t_17_ += c1 * _t_20_; _t_24_ = -u2[i][j-2][k-2]; _t_24_ += u2[i][j-2][k+2]; _t_22_ = c2 * _t_24_; _t_25_ = -u2[i][j-2][k-1]; _t_25_ += u2[i][j-2][k+1]; _t_22_ += c1 * _t_25_; _t_32_ = -u1[i][j+1][k-2]; _t_32_ += u1[i][j+1][k+2]; _t_30_ = c2 * _t_32_; _t_33_ = -u1[i][j+1][k-1]; _t_33_ += u1[i][j+1][k+1]; _t_30_ += c1 * _t_33_; _t_37_ = -u2[i][j+1][k-2]; _t_37_ += u2[i][j+1][k+2]; _t_35_ = c2 * _t_37_; _t_38_ = -u2[i][j+1][k-1]; _t_38_ += u2[i][j+1][k+1]; _t_35_ += c1 * _t_38_; _t_44_ = -u1[i][j-1][k-2]; _t_44_ += u1[i][j-1][k+2]; _t_42_ = c2 * _t_44_; _t_45_ = -u1[i][j-1][k-1]; _t_45_ += u1[i][j-1][k+1]; _t_42_ += c1 * _t_45_; _t_49_ = -u2[i][j-1][k-2]; _t_49_ += u2[i][j-1][k+2]; _t_47_ = c2 * _t_49_; _t_50_ = -u2[i][j-1][k-1]; _t_50_ += u2[i][j-1][k+1]; _t_47_ += c1 * _t_50_; _t_7_ = -u1[i][j+2][k-2]; _t_7_ += u1[i][j+2][k+2]; _t_5_ = c2 * _t_7_; _t_8_ = -u1[i][j+2][k-1]; _t_8_ += u1[i][j+2][k+1]; _t_5_ += c1 * _t_8_; _t_23_ = mu[i][j-2][k] * met2[i][j-2][k]; _t_21_ = _t_23_ * met1[i][j-2][k]; _t_1_ = _t_21_ * _t_22_; _t_18_ = mu[i][j-2][k] * met3[i][j-2][k]; _t_16_ = _t_18_ * met1[i][j-2][k]; _t_15_ = _t_16_ * _t_17_; _t_60_ = mu[i][j-2][k] * met1[i][j-2][k]; _t_58_ = _t_60_ * met1[i][j-2][k]; _t_14_ = _t_15_ * stry[j]; _t_1_ += _t_14_ * strx[i]; _t_48_ = mu[i][j-1][k] * met2[i][j-1][k]; _t_46_ = _t_48_ * met1[i][j-1][k]; _t_26_ = _t_46_ * _t_47_; _t_43_ = mu[i][j-1][k] * met3[i][j-1][k]; _t_41_ = _t_43_ * met1[i][j-1][k]; _t_40_ = _t_41_ * _t_42_; _t_39_ = _t_40_ * stry[j]; _t_26_ += _t_39_ * strx[i]; _t_71_ = mu[i][j-1][k] * met1[i][j-1][k]; _t_69_ = _t_71_ * met1[i][j-1][k]; _t_36_ = mu[i][j+1][k] * met2[i][j+1][k]; _t_34_ = _t_36_ * met1[i][j+1][k]; _t_26_ += _t_34_ * _t_35_; _t_31_ = mu[i][j+1][k] * met3[i][j+1][k]; _t_29_ = _t_31_ * met1[i][j+1][k]; _t_28_ = _t_29_ * _t_30_; _t_66_ = mu[i][j+1][k] * met1[i][j+1][k]; _t_64_ = _t_66_ * met1[i][j+1][k]; _t_27_ = _t_28_ * stry[j-1]; _t_26_ += _t_27_ * strx[i]; _t_0_ = c1 * _t_26_; _t_11_ = mu[i][j+2][k] * met2[i][j+2][k]; _t_9_ = _t_11_ * met1[i][j+2][k]; _t_1_ += _t_9_ * _t_10_; _t_6_ = mu[i][j+2][k] * met3[i][j+2][k]; _t_4_ = _t_6_ * met1[i][j+2][k]; _t_3_ = _t_4_ * _t_5_; _t_55_ = mu[i][j+2][k] * met1[i][j+2][k]; _t_53_ = _t_55_ * met1[i][j+2][k]; _t_2_ = _t_3_ * stry[j+1]; _t_1_ += _t_2_ * strx[i]; _t_0_ += c2 * _t_1_; r1ic0jc0kc0 += _t_0_; _t_56_ = -u2[i-2][j+2][k]; _t_83_ = u2[i-2][j+2][k]; _t_83_ -= u2[i-2][j-2][k]; _t_61_ = -u2[i-2][j-2][k]; _t_81_ = c2 * _t_83_; _t_61_ += u2[i+2][j-2][k]; _t_78_ = -u2[i+2][j-2][k]; _t_56_ += u2[i+2][j+2][k]; _t_78_ += u2[i+2][j+2][k]; _t_54_ = c2 * _t_56_; _t_59_ = c2 * _t_61_; _t_76_ = c2 * _t_78_; _t_57_ = -u2[i-1][j+2][k]; _t_94_ = u2[i-1][j+2][k]; _t_94_ -= u2[i-1][j-2][k]; _t_62_ = -u2[i-1][j-2][k]; _t_92_ = c2 * _t_94_; _t_62_ += u2[i+1][j-2][k]; _t_59_ += c1 * _t_62_; _t_52_ = _t_58_ * _t_59_; _t_89_ = -u2[i+1][j-2][k]; _t_57_ += u2[i+1][j+2][k]; _t_54_ += c1 * _t_57_; _t_52_ += _t_53_ * _t_54_; _t_89_ += u2[i+1][j+2][k]; _t_87_ = c2 * _t_89_; _t_51_ = c2 * _t_52_; _t_79_ = -u2[i+2][j-1][k]; _t_72_ = u2[i+2][j-1][k]; _t_72_ -= u2[i-2][j-1][k]; _t_84_ = -u2[i-2][j-1][k]; _t_70_ = c2 * _t_72_; _t_84_ += u2[i-2][j+1][k]; _t_81_ += c1 * _t_84_; _t_67_ = -u2[i-2][j+1][k]; _t_67_ += u2[i+2][j+1][k]; _t_79_ += u2[i+2][j+1][k]; _t_76_ += c1 * _t_79_; _t_65_ = c2 * _t_67_; _t_68_ = -u2[i-1][j+1][k]; _t_95_ = u2[i-1][j+1][k]; _t_95_ -= u2[i-1][j-1][k]; _t_92_ += c1 * _t_95_; _t_73_ = -u2[i-1][j-1][k]; _t_73_ += u2[i+1][j-1][k]; _t_70_ += c1 * _t_73_; _t_63_ = _t_69_ * _t_70_; _t_90_ = -u2[i+1][j-1][k]; _t_68_ += u2[i+1][j+1][k]; _t_65_ += c1 * _t_68_; _t_63_ += _t_64_ * _t_65_; _t_51_ += c1 * _t_63_; _t_90_ += u2[i+1][j+1][k]; _t_87_ += c1 * _t_90_; _t_82_ = la[i-2][j][k] * met1[i-2][j][k]; _t_80_ = _t_82_ * met1[i-2][j][k]; _t_74_ = _t_80_ * _t_81_; _t_93_ = la[i-1][j][k] * met1[i-1][j][k]; _t_91_ = _t_93_ * met1[i-1][j][k]; _t_85_ = _t_91_ * _t_92_; _t_88_ = la[i+1][j][k] * met1[i+1][j][k]; _t_86_ = _t_88_ * met1[i+1][j][k]; _t_85_ += _t_86_ * _t_87_; _t_51_ += c1 * _t_85_; _t_77_ = la[i+2][j][k] * met1[i+2][j][k]; _t_75_ = _t_77_ * met1[i+2][j][k]; _t_74_ += _t_75_ * _t_76_; _t_51_ += c2 * _t_74_; r1ic0jc0kc0 += _t_51_; r1[i][j][k] = r1ic0jc0kc0; } } extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) { double *r1; cudaMalloc (&r1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for r1\n"); cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u1; cudaMalloc (&u1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u1\n"); cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u2; cudaMalloc (&u2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u2\n"); cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u3; cudaMalloc (&u3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u3\n"); cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met1; cudaMalloc (&met1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met1\n"); cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met2; cudaMalloc (&met2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met2\n"); cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met3; cudaMalloc (&met3, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met3\n"); cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *met4; cudaMalloc (&met4, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for met4\n"); cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); dim3 blockconfig_1 (16, 2, 2); dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z)); curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N); cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); }
d1c4719c61eec2d06218c36fd4dbe8031cb170dc.hip
// !!! This is a file automatically generated by hipify!!! #include "THHApply.cuh" #include "TH/THHalf.h" #include "THHNumerics.cuh" #include "THHTensorCopy.hpp" #include <type_traits> inline int curGPU() { int curDev; THCudaCheck(hipGetDevice(&curDev)); return curDev; } // Copy operator for the pointwise apply kernel template <typename TypeDst, typename TypeSrc> struct CopyOp { __device__ __forceinline__ void operator()(TypeDst* dst, TypeSrc* src) { #if __CUDA_ARCH__ >= 350 *dst = ScalarConvert<TypeSrc, TypeDst>::to(__ldg(src)); #else *dst = ScalarConvert<TypeSrc, TypeDst>::to(*src); #endif } }; // Copy for the same type to the same type template <typename ScalarTypeDst, typename ScalarTypeSrc> void THC_copyTensor(THCState* state, THCTensor* dst, THCTensor* src) { ptrdiff_t totalElements = THCTensor_nElement(state, dst); THArgCheck(totalElements == THCTensor_nElement(state, src), 2, "sizes do not match"); if (THCTensor_nDimensionLegacyAll(state, dst) == 0) { // Zero-dim tensor; copy nothing return; } // We can memcpy the memory if: // -both tensors are contiguous; or, // -there is only one element to copy; or, // -FIXME: if both tensors have matching size and stride arrays, and no // holes within (in other words, there is some permutation that can be applied // to the size/strides such that the resulting tensor is // contiguous). // -AND: both tensors have the same type. bool sameType = std::is_same<ScalarTypeDst, ScalarTypeSrc>::value; bool srcContig = src->is_contiguous(); bool dstContig = dst->is_contiguous(); bool memcpyEligible = ((srcContig && dstContig) || (totalElements == 1)) && sameType; int srcDev = THCTensor_getDevice(state, src); int dstDev = THCTensor_getDevice(state, dst); int oldDev = curGPU(); // Try to enable p2p access. This also handles the case srcDev == dstDev. bool p2pEnabled = THCState_getPeerToPeerAccess(state, srcDev, dstDev); // We always perform the copy on the source device, using the // current stream on the source device. // If the copy is on the default stream, then we fully synchronize // both src and dst's default streams for completion of the // copy. We have to explicitly do this for non-contig copies. // This mimics the behavior of cross-device hipMemcpyAsync on // the default stream. // If the copy is not on the default stream, then it is up to the // user to add needed synchronization on the dst device, since the // stream on the dst device that wishes to synchronize may not be // the same index as the one on the src device. hipStream_t copyStream = THCState_getCurrentStreamOnDevice(state, srcDev); if (srcDev != dstDev && copyStream == NULL) { // This is a cross-device copy on the default stream. We perform a // two-way barrier between both devices' default streams before // the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are // handled, so that no one is operating on the dst memory when // we perform the copy. // src waits on dst barrier (src already waits on src) hipEvent_t dstReady; THCudaCheck(hipSetDevice(dstDev)); THCudaCheck(hipEventCreateWithFlags(&dstReady, hipEventDisableTiming)); THCudaCheck(hipEventRecord(dstReady, NULL)); THCudaCheck(hipSetDevice(srcDev)); THCudaCheck(hipStreamWaitEvent(NULL, dstReady, 0)); THCudaCheck(hipEventDestroy(dstReady)); } else if (srcDev != oldDev) { THCudaCheck(hipSetDevice(srcDev)); } // We are now on srcDev if (memcpyEligible) { // Perform the copy THCudaCheck(hipMemcpyAsync( dst->template data<ScalarTypeDst>(), src->template data<ScalarTypeSrc>(), totalElements * sizeof(ScalarTypeDst), hipMemcpyDeviceToDevice, copyStream)); } else { // Non-contiguous copy or a type-conversion copy // We avoid creating temporary memory copies if possible. // If both src and dst are on the same device, or if they are on // different devices and p2p access is enabled, perform the copy // by a pointwise copy kernel. // Otherwise, we'll have to make contiguous (which will in fact // invoke copy() again), and then perform the copy. // FIXME: might want to consider only running the pointwise kernel // if both src and dst innermost dimensions are contiguous. If // they are not, then taking the hit of the memory allocation/free // might be worth it to avoid non-coalesced reads or writes. if (p2pEnabled) { bool succ = THC_pointwiseApply2<ScalarTypeDst, ScalarTypeSrc>( state, dst, src, CopyOp<ScalarTypeDst, ScalarTypeSrc>()); THArgCheck(succ, 2, CUTORCH_DIM_WARNING); } else { // GPUs can't access each other directly, but the tensors // involved are non-contiguous and/or are different types. // Make sure the src is contiguous and in the same type as dst THCudaCheck(hipSetDevice(srcDev)); THCTensor* srcContig = NULL; if (sameType) { srcContig = THCTensor_newContiguous<ScalarTypeSrc>(state, src); } else { // Types are different // Copy into the new format, contiguous, on the source device srcContig = THCTensor_new(state, caffe2::TypeMeta::Make<ScalarTypeDst>()); THCTensor_resizeAs(state, srcContig, dst); bool succ = THC_pointwiseApply2<ScalarTypeDst, ScalarTypeSrc>( state, srcContig, src, CopyOp<ScalarTypeDst, ScalarTypeSrc>()); THArgCheck(succ, 2, CUTORCH_DIM_WARNING); } // Make sure the dst is contiguous THCudaCheck(hipSetDevice(dstDev)); THCTensor* dstContig = THCTensor_newContiguous<ScalarTypeDst>(state, dst); // Now, we are ready for a cross-device memcpy of contiguous // data, of the same layout and type THCudaCheck(hipSetDevice(srcDev)); THCudaCheck(hipMemcpyAsync( dstContig->template data<ScalarTypeDst>(), srcContig->template data<ScalarTypeDst>(), totalElements * sizeof(ScalarTypeDst), hipMemcpyDeviceToDevice, copyStream)); // We are done with the src THCTensor_free(state, srcContig); if (dst != dstContig) { THCTensor_freeCopyTo<ScalarTypeDst>(state, dstContig, dst); } else { THCTensor_free(state, dstContig); } // We're still on srcDev at this point } } if (srcDev != dstDev && copyStream == NULL) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on srcDev, record default stream event hipEvent_t srcReady; THCudaCheck(hipEventCreateWithFlags(&srcReady, hipEventDisableTiming)); THCudaCheck(hipEventRecord(srcReady, NULL)); THCudaCheck(hipSetDevice(dstDev)); THCudaCheck(hipStreamWaitEvent(NULL, srcReady, 0)); THCudaCheck(hipEventDestroy(srcReady)); // We are now on dstDev (right above). Restore prior device from dst if (dstDev != oldDev) { THCudaCheck(hipSetDevice(oldDev)); } } else { // We are still on srcDev. Restore prior device from src if (srcDev != oldDev) { THCudaCheck(hipSetDevice(oldDev)); } } THCudaCheck(hipGetLastError()); } #include "generic/THCTensorCopy.cu" #include "THHGenerateAllTypes.h"
d1c4719c61eec2d06218c36fd4dbe8031cb170dc.cu
#include "THCApply.cuh" #include "TH/THHalf.h" #include "THCNumerics.cuh" #include "THCTensorCopy.hpp" #include <type_traits> inline int curGPU() { int curDev; THCudaCheck(cudaGetDevice(&curDev)); return curDev; } // Copy operator for the pointwise apply kernel template <typename TypeDst, typename TypeSrc> struct CopyOp { __device__ __forceinline__ void operator()(TypeDst* dst, TypeSrc* src) { #if __CUDA_ARCH__ >= 350 *dst = ScalarConvert<TypeSrc, TypeDst>::to(__ldg(src)); #else *dst = ScalarConvert<TypeSrc, TypeDst>::to(*src); #endif } }; // Copy for the same type to the same type template <typename ScalarTypeDst, typename ScalarTypeSrc> void THC_copyTensor(THCState* state, THCTensor* dst, THCTensor* src) { ptrdiff_t totalElements = THCTensor_nElement(state, dst); THArgCheck(totalElements == THCTensor_nElement(state, src), 2, "sizes do not match"); if (THCTensor_nDimensionLegacyAll(state, dst) == 0) { // Zero-dim tensor; copy nothing return; } // We can memcpy the memory if: // -both tensors are contiguous; or, // -there is only one element to copy; or, // -FIXME: if both tensors have matching size and stride arrays, and no // holes within (in other words, there is some permutation that can be applied // to the size/strides such that the resulting tensor is // contiguous). // -AND: both tensors have the same type. bool sameType = std::is_same<ScalarTypeDst, ScalarTypeSrc>::value; bool srcContig = src->is_contiguous(); bool dstContig = dst->is_contiguous(); bool memcpyEligible = ((srcContig && dstContig) || (totalElements == 1)) && sameType; int srcDev = THCTensor_getDevice(state, src); int dstDev = THCTensor_getDevice(state, dst); int oldDev = curGPU(); // Try to enable p2p access. This also handles the case srcDev == dstDev. bool p2pEnabled = THCState_getPeerToPeerAccess(state, srcDev, dstDev); // We always perform the copy on the source device, using the // current stream on the source device. // If the copy is on the default stream, then we fully synchronize // both src and dst's default streams for completion of the // copy. We have to explicitly do this for non-contig copies. // This mimics the behavior of cross-device cudaMemcpyAsync on // the default stream. // If the copy is not on the default stream, then it is up to the // user to add needed synchronization on the dst device, since the // stream on the dst device that wishes to synchronize may not be // the same index as the one on the src device. cudaStream_t copyStream = THCState_getCurrentStreamOnDevice(state, srcDev); if (srcDev != dstDev && copyStream == NULL) { // This is a cross-device copy on the default stream. We perform a // two-way barrier between both devices' default streams before // the copy. This ensures that any write-after-write and // write-after-read dependencies on the destination side are // handled, so that no one is operating on the dst memory when // we perform the copy. // src waits on dst barrier (src already waits on src) cudaEvent_t dstReady; THCudaCheck(cudaSetDevice(dstDev)); THCudaCheck(cudaEventCreateWithFlags(&dstReady, cudaEventDisableTiming)); THCudaCheck(cudaEventRecord(dstReady, NULL)); THCudaCheck(cudaSetDevice(srcDev)); THCudaCheck(cudaStreamWaitEvent(NULL, dstReady, 0)); THCudaCheck(cudaEventDestroy(dstReady)); } else if (srcDev != oldDev) { THCudaCheck(cudaSetDevice(srcDev)); } // We are now on srcDev if (memcpyEligible) { // Perform the copy THCudaCheck(cudaMemcpyAsync( dst->template data<ScalarTypeDst>(), src->template data<ScalarTypeSrc>(), totalElements * sizeof(ScalarTypeDst), cudaMemcpyDeviceToDevice, copyStream)); } else { // Non-contiguous copy or a type-conversion copy // We avoid creating temporary memory copies if possible. // If both src and dst are on the same device, or if they are on // different devices and p2p access is enabled, perform the copy // by a pointwise copy kernel. // Otherwise, we'll have to make contiguous (which will in fact // invoke copy() again), and then perform the copy. // FIXME: might want to consider only running the pointwise kernel // if both src and dst innermost dimensions are contiguous. If // they are not, then taking the hit of the memory allocation/free // might be worth it to avoid non-coalesced reads or writes. if (p2pEnabled) { bool succ = THC_pointwiseApply2<ScalarTypeDst, ScalarTypeSrc>( state, dst, src, CopyOp<ScalarTypeDst, ScalarTypeSrc>()); THArgCheck(succ, 2, CUTORCH_DIM_WARNING); } else { // GPUs can't access each other directly, but the tensors // involved are non-contiguous and/or are different types. // Make sure the src is contiguous and in the same type as dst THCudaCheck(cudaSetDevice(srcDev)); THCTensor* srcContig = NULL; if (sameType) { srcContig = THCTensor_newContiguous<ScalarTypeSrc>(state, src); } else { // Types are different // Copy into the new format, contiguous, on the source device srcContig = THCTensor_new(state, caffe2::TypeMeta::Make<ScalarTypeDst>()); THCTensor_resizeAs(state, srcContig, dst); bool succ = THC_pointwiseApply2<ScalarTypeDst, ScalarTypeSrc>( state, srcContig, src, CopyOp<ScalarTypeDst, ScalarTypeSrc>()); THArgCheck(succ, 2, CUTORCH_DIM_WARNING); } // Make sure the dst is contiguous THCudaCheck(cudaSetDevice(dstDev)); THCTensor* dstContig = THCTensor_newContiguous<ScalarTypeDst>(state, dst); // Now, we are ready for a cross-device memcpy of contiguous // data, of the same layout and type THCudaCheck(cudaSetDevice(srcDev)); THCudaCheck(cudaMemcpyAsync( dstContig->template data<ScalarTypeDst>(), srcContig->template data<ScalarTypeDst>(), totalElements * sizeof(ScalarTypeDst), cudaMemcpyDeviceToDevice, copyStream)); // We are done with the src THCTensor_free(state, srcContig); if (dst != dstContig) { THCTensor_freeCopyTo<ScalarTypeDst>(state, dstContig, dst); } else { THCTensor_free(state, dstContig); } // We're still on srcDev at this point } } if (srcDev != dstDev && copyStream == NULL) { // dst waits on src barrier (dst already waits on dst). We cannot // operate on dst's copy until the copy is complete. // Still on srcDev, record default stream event cudaEvent_t srcReady; THCudaCheck(cudaEventCreateWithFlags(&srcReady, cudaEventDisableTiming)); THCudaCheck(cudaEventRecord(srcReady, NULL)); THCudaCheck(cudaSetDevice(dstDev)); THCudaCheck(cudaStreamWaitEvent(NULL, srcReady, 0)); THCudaCheck(cudaEventDestroy(srcReady)); // We are now on dstDev (right above). Restore prior device from dst if (dstDev != oldDev) { THCudaCheck(cudaSetDevice(oldDev)); } } else { // We are still on srcDev. Restore prior device from src if (srcDev != oldDev) { THCudaCheck(cudaSetDevice(oldDev)); } } THCudaCheck(cudaGetLastError()); } #include "generic/THCTensorCopy.cu" #include "THCGenerateAllTypes.h"
e13616b591ea21d4e1bc6697b2328b1e2f464483.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <SFML/Graphics.hpp> #include <SFML/OpenGL.hpp> #include <SFML/Window.hpp> #include <winuser.h> #include <Windows.h> #include "geometry.cuh" #include "voxel_hip.cuh" #include <vector> #include <fstream> /* #ifdef _WIN32 #include <GL/gl.h> #include <GL/glu.h> #endif typedef union PixelInfo { std::uint32_t Colour; struct { std::uint8_t R, G, B, A; }; } *PPixelInfo; class Tga { private: std::vector<std::uint8_t> Pixels; bool ImageCompressed; std::uint32_t width, height, size, BitsPerPixel; public: Tga(const char* FilePath); std::vector<std::uint8_t> GetPixels() {return this->Pixels;} std::uint32_t GetWidth() const {return this->width;} std::uint32_t GetHeight() const {return this->height;} bool HasAlphaChannel() {return BitsPerPixel == 32;} }; Tga::Tga(const char* FilePath) { std::fstream hFile(FilePath, std::ios::in | std::ios::binary); if (!hFile.is_open()){throw std::invalid_argument("File Not Found.");} std::uint8_t Header[18] = {0}; std::vector<std::uint8_t> ImageData; static std::uint8_t DeCompressed[12] = {0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; static std::uint8_t IsCompressed[12] = {0x0, 0x0, 0xA, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; hFile.read(reinterpret_cast<char*>(&Header), sizeof(Header)); if (!std::memcmp(DeCompressed, &Header, sizeof(DeCompressed))) { BitsPerPixel = Header[16]; width = Header[13] * 256 + Header[12]; height = Header[15] * 256 + Header[14]; size = ((width * BitsPerPixel + 31) / 32) * 4 * height; if ((BitsPerPixel != 24) && (BitsPerPixel != 32)) { hFile.close(); throw std::invalid_argument("Invalid File Format. Required: 24 or 32 Bit Image."); } ImageData.resize(size); ImageCompressed = false; hFile.read(reinterpret_cast<char*>(ImageData.data()), size); } else if (!std::memcmp(IsCompressed, &Header, sizeof(IsCompressed))) { BitsPerPixel = Header[16]; width = Header[13] * 256 + Header[12]; height = Header[15] * 256 + Header[14]; size = ((width * BitsPerPixel + 31) / 32) * 4 * height; if ((BitsPerPixel != 24) && (BitsPerPixel != 32)) { hFile.close(); throw std::invalid_argument("Invalid File Format. Required: 24 or 32 Bit Image."); } PixelInfo Pixel = {0}; int CurrentByte = 0; std::size_t CurrentPixel = 0; ImageCompressed = true; std::uint8_t ChunkHeader = {0}; int BytesPerPixel = (BitsPerPixel / 8); ImageData.resize(width * height * sizeof(PixelInfo)); do { hFile.read(reinterpret_cast<char*>(&ChunkHeader), sizeof(ChunkHeader)); if(ChunkHeader < 128) { ++ChunkHeader; for(int I = 0; I < ChunkHeader; ++I, ++CurrentPixel) { hFile.read(reinterpret_cast<char*>(&Pixel), BytesPerPixel); ImageData[CurrentByte++] = Pixel.B; ImageData[CurrentByte++] = Pixel.G; ImageData[CurrentByte++] = Pixel.R; if (BitsPerPixel > 24) ImageData[CurrentByte++] = Pixel.A; } } else { ChunkHeader -= 127; hFile.read(reinterpret_cast<char*>(&Pixel), BytesPerPixel); for(int I = 0; I < ChunkHeader; ++I, ++CurrentPixel) { ImageData[CurrentByte++] = Pixel.B; ImageData[CurrentByte++] = Pixel.G; ImageData[CurrentByte++] = Pixel.R; if (BitsPerPixel > 24) ImageData[CurrentByte++] = Pixel.A; } } } while(CurrentPixel < (width * height)); } else { hFile.close(); throw std::invalid_argument("Invalid File Format. Required: 24 or 32 Bit TGA File."); } hFile.close(); this->Pixels = ImageData; } */ // TODO __host__ __device__ // TODO || (cuBLAS??) // TODO + // TODO // TODO cuda error #define M_PI 3.1415 #define MAP_SIZE 200 #define distToViewPort 1.0 #define Vh 1.0 #define Vw 1.0 #define imageHeight 1024 #define imageWidth 1024 #define crossSize 64 #define BOX_SIZE 4 #define MAX_FRAMES 1000 //#define DEBUG struct Camera { public: Camera() = default; double3 eyePosition; double angleX, angleY; double speed; Camera(double3 &eye, double xAng, double yAng) : angleX(xAng), angleY(yAng), eyePosition(eye), speed(0.3) {} }; struct Ray { public: double3 source; double3 direction; }; class Box { public: __host__ __device__ Box() = default; __host__ __device__ Box(int X, int Y, int Z) : x(X), y(Y), z(Z) { updateMinMax(); } __host__ __device__ bool intersect(const Ray &, double t0, double t1, double3 &inter1Point, double3 &inter2Point) const; double3 bounds[2]{}; __host__ __device__ void inc_x() { x++; updateMinMax(); } __host__ __device__ void inc_y() { y++; updateMinMax(); } __host__ __device__ void inc_z() { z++; updateMinMax(); } __host__ __device__ void dec_x() { x--; updateMinMax(); } __host__ __device__ void dec_y() { y--; updateMinMax(); } __host__ __device__ void dec_z() { z--; updateMinMax(); } __host__ __device__ int get_x() { return x; } __host__ __device__ int get_y() { return y; } __host__ __device__ int get_z() { return z; } private: int x, y, z; __host__ __device__ void updateMinMax() { bounds[0] = make_double3(x * BOX_SIZE, y * BOX_SIZE, z * BOX_SIZE); bounds[1] = bounds[0] + make_double3(BOX_SIZE, BOX_SIZE, BOX_SIZE); } }; // , interPoint __host__ __device__ bool Box::intersect(const Ray &r, double t0, double t1, double3 &inter1Point, double3 &inter2Point) const { double tmin, tmax, tymin, tymax, tzmin, tzmax; if (r.direction.x >= 0) { tmin = (bounds[0].x - r.source.x) / r.direction.x; tmax = (bounds[1].x - r.source.x) / r.direction.x; } else { tmin = (bounds[1].x - r.source.x) / r.direction.x; tmax = (bounds[0].x - r.source.x) / r.direction.x; } if (r.direction.y >= 0) { tymin = (bounds[0].y - r.source.y) / r.direction.y; tymax = (bounds[1].y - r.source.y) / r.direction.y; } else { tymin = (bounds[1].y - r.source.y) / r.direction.y; tymax = (bounds[0].y - r.source.y) / r.direction.y; } if ((tmin > tymax) || (tymin > tmax)) return false; if (tymin > tmin) tmin = tymin; if (tymax < tmax) tmax = tymax; if (r.direction.z >= 0) { tzmin = (bounds[0].z - r.source.z) / r.direction.z; tzmax = (bounds[1].z - r.source.z) / r.direction.z; } else { tzmin = (bounds[1].z - r.source.z) / r.direction.z; tzmax = (bounds[0].z - r.source.z) / r.direction.z; } if ((tmin > tzmax) || (tzmin > tmax)) return false; if (tzmin > tmin) tmin = tzmin; if (tzmax < tmax) tmax = tzmax; if (((tmin < t1) && (tmax > t0))) { inter1Point = r.source + r.direction * tmin; inter2Point = r.source + r.direction * tmax; return true; } return false; } __host__ __device__ void buildDirectionOfStraightMove(Camera *cam, double3 *move) { double3 origin = make_double3(0., 0., 1.); double3 xRotated = MakeRotationY((cam->angleX) * M_PI / 180.) * make_double3(1.0, 0.0, 0.0); double3 result = MakeRotationY((cam->angleX) * M_PI / 180.) * origin; result = MakeRotation(-cam->angleY * M_PI / 180., xRotated) * result; *move = result; } __host__ __device__ void buildDirectionOfNotSoStraightMove(Camera *cam, double3 *move) { double3 origin = make_double3(0., 0., 1.); double3 xRotated = MakeRotationY((cam->angleX) * M_PI / 180.) * make_double3(1.0, 0.0, 0.0); double3 result = MakeRotationY((cam->angleX) * M_PI / 180.) * origin; result = MakeRotation(-cam->angleY * M_PI / 180., xRotated) * result; double3 viewDirection = result; double3 y = make_double3(0., 1., 0.); *move = Cross(viewDirection, y); } __host__ __device__ Ray computePrimRay(Camera *cam, const int i, const int j) { double3 projectPlaneOrigin = make_double3(0, 0, distToViewPort); double3 rightArrow = make_double3(Vw, 0, 00), downArrow = make_double3(0.0, Vh, 0.0); double3 leftTopCorner = projectPlaneOrigin - rightArrow / 2 - downArrow / 2; double3 hitDotOnProjectPlane = leftTopCorner + rightArrow * (static_cast<double>(j) / imageWidth) + downArrow * (static_cast<double>(i) / imageHeight); double3 xRotated = MakeRotationY((cam->angleX) * M_PI / 180.) * make_double3(1.0, 0.0, 0.0); hitDotOnProjectPlane = MakeRotationY((cam->angleX) * M_PI / 180.) * hitDotOnProjectPlane; hitDotOnProjectPlane = MakeRotation(-cam->angleY * M_PI / 180., xRotated) * hitDotOnProjectPlane; hitDotOnProjectPlane = hitDotOnProjectPlane + cam->eyePosition; double3 dir = hitDotOnProjectPlane - cam->eyePosition; return {cam->eyePosition, dir}; } __host__ __device__ bool checkWorld(Box *box, voxel* world) { if (box->get_x() < 0 || box->get_y() < 0 || box->get_z() < 0 || box->get_x() >= MAP_SIZE || box->get_y() >= MAP_SIZE || box->get_z() >= MAP_SIZE) return 0; return world[box->get_x() * MAP_SIZE * MAP_SIZE + box->get_y() * MAP_SIZE + box->get_z()].isActive(); } /* (x, y, z) - world */ __device__ double3 traverseRay(int startX, int startY, int startZ, Ray &ray, int deep, voxel* world, Box *lastBox) { Box currentBox = Box(startX, startY, startZ); ray.direction = Normalize(&ray.direction); double3 deltaT; double t_x = ((BOX_SIZE - ray.source.x) / ray.direction.x), t_y = ((BOX_SIZE - ray.source.y) / ray.direction.y), t_z = ((BOX_SIZE - ray.source.z) / ray.direction.z) ; if (ray.direction.x < 0) { deltaT.x = -BOX_SIZE / ray.direction.x; t_x = (floor(ray.source.x / BOX_SIZE) * BOX_SIZE - ray.source.x) / ray.direction.x; } else { deltaT.x = BOX_SIZE / ray.direction.x; t_x = ((floor(ray.source.x / BOX_SIZE) + 1) * BOX_SIZE - ray.source.x) / ray.direction.x; } if (ray.direction.y < 0) { deltaT.y = -BOX_SIZE / ray.direction.y; t_y = (floor(ray.source.y / BOX_SIZE) * BOX_SIZE - ray.source.y) / ray.direction.y; } else { deltaT.y = BOX_SIZE / ray.direction.y; t_y = ((floor(ray.source.y / BOX_SIZE) + 1) * BOX_SIZE - ray.source.y) / ray.direction.y; } if (ray.direction.z < 0) { deltaT.z = -BOX_SIZE / ray.direction.z; t_z = (floor(ray.source.z / BOX_SIZE) * BOX_SIZE - ray.source.z) / ray.direction.z; } else { deltaT.z = BOX_SIZE / ray.direction.z; t_z = ((floor(ray.source.z / BOX_SIZE) + 1) * BOX_SIZE - ray.source.z) / ray.direction.z; } while (true) { if (currentBox.get_x() < 0 || currentBox.get_y() < 0 || currentBox.get_z() < 0 || currentBox.get_x() >= MAP_SIZE || currentBox.get_y() >= MAP_SIZE || currentBox.get_z() >= MAP_SIZE /*|| deep > MAP_SIZE * 2*/) { *lastBox = Box(-1, -1, -1); return make_double3(-1., -1., -1.); } double t = 0.; if (t_x < t_y) { if (t_x < t_z) { t = t_x; t_x += deltaT.x; // increment, next crossing along x if(ray.direction.x < 0) currentBox.dec_x(); else currentBox.inc_x(); } else { t = t_z; t_z += deltaT.z; // increment, next crossing along x if(ray.direction.z < 0) currentBox.dec_z(); else currentBox.inc_z(); } } else { if (t_y < t_z) { t = t_y; t_y += deltaT.y; // increment, next crossing along x if(ray.direction.y < 0) currentBox.dec_y(); else currentBox.inc_y(); } else { t = t_z; t_z += deltaT.z; // increment, next crossing along x if(ray.direction.z < 0) currentBox.dec_z(); else currentBox.inc_z(); } } if (checkWorld(&currentBox, world)) { *lastBox = currentBox; return ray.source + ray.direction * t; } deep++; } } /* _ */ __host__ __device__ bool hitRay(int startX, int startY, int startZ, Ray &ray, int deep, Box &boxToDelete, Box &boxToAdd, voxel* world) { const double eps = 0.000001; Box currentBox = Box(startX, startY, startZ); while (true) { if (currentBox.get_x() < 0 || currentBox.get_y() < 0 || currentBox.get_z() < 0 || currentBox.get_x() >= MAP_SIZE || currentBox.get_y() >= MAP_SIZE || currentBox.get_z() >= MAP_SIZE || deep > 150) return false; /* A1 < A2 : */ double3 A1 = double3(), A2 = double3(); if (currentBox.intersect(ray, 0, INFINITY, A1, A2)) { boxToAdd = currentBox; double3 A2_normalized = A2 - currentBox.bounds[0]; if (abs(A2_normalized.x) < eps) currentBox.dec_x(); if (abs(A2_normalized.y) < eps) currentBox.dec_y(); if (abs(A2_normalized.z) < eps) currentBox.dec_z(); if (abs(A2_normalized.x - BOX_SIZE) < eps) currentBox.inc_x(); if (abs(A2_normalized.y - BOX_SIZE) < eps) currentBox.inc_y(); if (abs(A2_normalized.z - BOX_SIZE) < eps) currentBox.inc_z(); if (checkWorld(&currentBox, world)) { boxToDelete = currentBox; return true; } } deep++; } } // TODO ? __host__ __device__ void deleteVoxel(Camera *cam, voxel* world) { Ray hit = computePrimRay(cam, imageWidth / 2, imageHeight / 2); Box boxToDelete = Box(0, 0, 0), boxToAdd = Box(0, 0, 0); if (hitRay(static_cast<int>(cam->eyePosition.x / BOX_SIZE), static_cast<int>(cam->eyePosition.y / BOX_SIZE), static_cast<int>(cam->eyePosition.z / BOX_SIZE), hit, 5, boxToDelete, boxToAdd, world)) { int dx[] = { 1, 0, -1, 0, 0, 0}; int dy[] = { 0, 1, 0, -1, 0, 0 }; int dz[] = { 0, 0, 0, 0, 1, -1 }; for (int i = 0; i < 6; i++) { world[(boxToDelete.get_x() + dx[i]) * MAP_SIZE * MAP_SIZE + (boxToDelete.get_y() + dy[i]) * MAP_SIZE + boxToDelete.get_z() + dz[i]].setInactive(); } world[boxToDelete.get_x() * MAP_SIZE * MAP_SIZE + boxToDelete.get_y() * MAP_SIZE + boxToDelete.get_z()].setInactive(); } } __host__ __device__ void addVoxel(Camera *cam, voxel* world) { Ray hit = computePrimRay(cam, imageWidth / 2, imageHeight / 2); Box boxToAdd = Box(0, 0, 0), boxToDelete = Box(0, 0, 0); if (hitRay(static_cast<int>(cam->eyePosition.x / BOX_SIZE), static_cast<int>(cam->eyePosition.y / BOX_SIZE), static_cast<int>(cam->eyePosition.z / BOX_SIZE), hit, 5, boxToDelete, boxToAdd, world)) { world[boxToAdd.get_x() * MAP_SIZE * MAP_SIZE + boxToAdd.get_y() * MAP_SIZE + boxToAdd.get_z()].setActive(); } } __global__ void traversePixels(uint3 *screen, Camera *cam, voxel* world, double3 *lightSource) { __shared__ uint3 temp[512]; __shared__ double3 firstHitDots[512]; __shared__ Camera sharedCam; __shared__ double3 firstHitDotsNormalized[512]; sharedCam = *cam; double eps = 0.0000001; // int idx = blockIdx.x * blockDim.x + threadIdx.x; Box currBox = Box(); uchar3 color; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int idx = j + i * blockDim.x * gridDim.x; int linearThreadIdxInBlock = threadIdx.x + threadIdx.y * 16; Ray primRay = computePrimRay(cam, i, j); firstHitDots[linearThreadIdxInBlock] = traverseRay((static_cast<int>(sharedCam.eyePosition.x / BOX_SIZE)), (static_cast<int>(sharedCam.eyePosition.y / BOX_SIZE)), (static_cast<int>(sharedCam.eyePosition.z / BOX_SIZE)), primRay, 0, world, &currBox); double3 emptyConst = make_double3(-1., -1., -1.); if (firstHitDots[linearThreadIdxInBlock] == emptyConst) { /** = */ color = make_uchar3(21, 4, 133); } else if (checkWorld(&currBox, world) == 1) { __syncthreads(); int3 coordinatesOfVoxel = make_int3(currBox.get_x(), currBox.get_y(), currBox.get_z()); color = world[coordinatesOfVoxel.x * MAP_SIZE * MAP_SIZE + coordinatesOfVoxel.y * MAP_SIZE + coordinatesOfVoxel.z].color; double3 dir = firstHitDots[linearThreadIdxInBlock] - *lightSource; Ray shadowRay; shadowRay.source = *lightSource; shadowRay.direction = dir; double3 lastLightHit = traverseRay((static_cast<int>(lightSource->x / BOX_SIZE)), (static_cast<int>(lightSource->y / BOX_SIZE)), (static_cast<int>(lightSource->z / BOX_SIZE)), shadowRay, 0, world, &currBox); hipDeviceSynchronize(); /*if (firstHitDots[linearThreadIdxInBlock].y / BOX_SIZE == MAP_SIZE - 10) color = make_uint3(198, 42, 136);*/ //hipDeviceSynchronize(); if (!(lastLightHit == firstHitDots[linearThreadIdxInBlock])) { /** */ color = color * 0.2; } else { /** */ /** firstHitDot */ firstHitDotsNormalized[linearThreadIdxInBlock] = firstHitDots[linearThreadIdxInBlock] - make_double3(round(firstHitDots[linearThreadIdxInBlock].x / BOX_SIZE.) * BOX_SIZE, round(firstHitDots[linearThreadIdxInBlock].y / BOX_SIZE.) * BOX_SIZE, round(firstHitDots[linearThreadIdxInBlock].z / BOX_SIZE.) * BOX_SIZE); double3 normal = make_double3(0., 0., 0.); //hipDeviceSynchronize(); if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].x) < eps) normal.x = -1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].x - BOX_SIZE) < eps) normal.x = +1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].y) < eps) normal.y = -1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].y - BOX_SIZE) < eps) normal.y = +1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].z) < eps) normal.z = -1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].z - BOX_SIZE) < eps) normal.z = +1.; double lightIntensity = 0.2; double cosx = Dot(normal, shadowRay.direction * -1.) / Magnitude(normal) / Magnitude(shadowRay.direction); double diffuser = (Magnitude(firstHitDots[linearThreadIdxInBlock] - *lightSource)); //cosx = 130000 * cosx / (diffuser * diffuser); if (cosx >= eps) lightIntensity += cosx; if (lightIntensity > 1.) lightIntensity = 1.0; color = color * lightIntensity; //hipDeviceSynchronize(); } } else { /** */ color = make_uchar3(255, 255, 255); } temp[linearThreadIdxInBlock].x = (static_cast<unsigned char>(color.x)); temp[linearThreadIdxInBlock].y = (static_cast<unsigned char>(color.y)); temp[linearThreadIdxInBlock].z = (static_cast<unsigned char>(color.z)); __syncthreads(); screen[idx].x = temp[linearThreadIdxInBlock].x; screen[idx].y = temp[linearThreadIdxInBlock].y; screen[idx].z = temp[linearThreadIdxInBlock].z; } /* std::chrono::milliseconds start_time; __host__ __device__ void generateMap(unsigned int *world) { static double t1 = 0.001, t2 = 0.001; std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::system_clock::now().time_since_epoch() ) - start_time; double delta = M_PI; int x1 = static_cast<int>(10*cos(t1)) + MAP_SIZE / 2; int z1 = static_cast<int>(10*sin(t1)) + MAP_SIZE / 2; int y1 = static_cast<int>(t1*3.) + BOX_SIZE * 2; int x2 = static_cast<int>(10*cos(-t2-delta)) + MAP_SIZE / 2; int z2 = static_cast<int>(10*sin(-t2-delta)) + MAP_SIZE / 2; int y2 = static_cast<int>(t2*5.) + BOX_SIZE * 2; if((x1 >= MAP_SIZE || y1 >= MAP_SIZE || z1 >= MAP_SIZE) || (x2 >= MAP_SIZE || y2 >= MAP_SIZE || z2 >= MAP_SIZE)) return; int idx = x1 * MAP_SIZE * MAP_SIZE + y1 * MAP_SIZE + z1; world[idx] = 1; idx = x2 * MAP_SIZE * MAP_SIZE + y2 * MAP_SIZE + z2; if(t1 > delta) { world[idx] = 1; t2 += 0.05; } t1 += 0.05; //for (int i = 0; i < MAP_SIZE * MAP_SIZE * MAP_SIZE; i++) // world[i] = (rand() % 1000 == 0); } */ bool bounds(double3 pos) { if (pos.x >= (MAP_SIZE - 1) * BOX_SIZE || pos.y >= (MAP_SIZE - 1) * BOX_SIZE || pos.z >= ((MAP_SIZE - 1) * BOX_SIZE) || pos.x <= 0 || pos.y <= 0 || pos.z <= 0) return false; return true; } void printDebug(Camera *cam) { printf("angleX: %lf\n", cam->angleX); printf("angleY: %lf\n", cam->angleY); printf("eyePosition: (%lf, %lf, %lf)\n", cam->eyePosition.x, cam->eyePosition.y, cam->eyePosition.z); } int main() { /* Tga info = Tga("C:/Users/...../Desktop/SomeTGA.tga"); GLuint texture = 0; glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); glTexImage2D(GL_TEXTURE_2D, 0, info.HasAlphaChannel() ? GL_RGBA : GL_RGB, info.GetWidth(), info.GetWidth(), 0, info.HasAlphaChannel() ? GL_RGBA : GL_RGB, GL_UNSIGNED_BYTE, info.GetPixels().data()); */ int frames = 0; float sumScreenRenderTime = 0.0; voxel *world; Camera *cam; uint3 *screen; double3* light; if (hipMallocManaged(&world, MAP_SIZE * MAP_SIZE * MAP_SIZE * sizeof(world[0]))) fprintf(stderr, "cuda malloc error: world"); if (hipMallocManaged(&screen, imageHeight * imageWidth * sizeof(uint3))) fprintf(stderr, "cuda malloc error: screen"); if (hipMallocManaged(&cam, sizeof(Camera))) fprintf(stderr, "cuda malloc error: camera"); if (hipMallocManaged(&light, sizeof(double3))) fprintf(stderr, "cuda malloc error: light"); uint3 localLight = make_uint3(MAP_SIZE / 2, 15, MAP_SIZE / 2); auto *hostScreen = static_cast<uint3 *>(malloc(imageHeight * imageWidth * sizeof(uint3))); int blocksCnt = 0; for (int i = 0; i < MAP_SIZE * MAP_SIZE * MAP_SIZE; i++) { int x, y, z; x = i / MAP_SIZE / MAP_SIZE; y = i / MAP_SIZE % MAP_SIZE; z = i % MAP_SIZE; int R = 35; if ((x - MAP_SIZE / 2) * (x - MAP_SIZE / 2) + (y - (MAP_SIZE - 2 * R)) * (y - (MAP_SIZE - 2 * R)) + (z - MAP_SIZE / 2) * (z - MAP_SIZE / 2) <= R * R) { world[i].setActive(); world[i].setColor(rand()%256, rand()%256, rand()%256); } if (y == MAP_SIZE - 10) { world[i].setActive(); world[i].setColor(0, 255, 0); } blocksCnt += world[i].isActive(); } hipDeviceSynchronize(); printf("Num of voxels: %d\n", blocksCnt); double3 eyePosition = make_double3(64.636510, 1.0, 294.136342); cam->eyePosition = eyePosition; cam->angleX = 234.833333; cam->angleY = -28.666667; cam->speed = 5.0; sf::Color backgroundColor = sf::Color::Black; sf::RenderWindow window(sf::VideoMode(imageHeight, imageWidth), "lol"); sf::Image image; image.create(imageHeight, imageWidth, sf::Color::Magenta); bool drawCross = true; sf::Texture crossTexture; if (!crossTexture.loadFromFile("cross.png", sf::IntRect(0, 0, crossSize, crossSize))) { fprintf(stderr, "Error loading cross.jpg\n"); } // TODO double3 *moveStraight; hipMallocManaged(&moveStraight, sizeof(double3)); double3 *moveNotStraight; hipMallocManaged(&moveNotStraight, sizeof(double3)); double t = 0.0; localLight.x = static_cast<int>(10 * cos(t)) + MAP_SIZE / 2; localLight.y = static_cast<int>(10 * sin(t)) + MAP_SIZE / 2; localLight.z = 10; while (window.isOpen()) { world[localLight.x * MAP_SIZE * MAP_SIZE + localLight.y * MAP_SIZE + localLight.z].setInactive(); localLight.x = static_cast<int>(40 * cos(t)) + MAP_SIZE / 2; localLight.y = static_cast<int>(40 * sin(t)) + MAP_SIZE / 2; localLight.z = 10; t += 0.05; light->x = localLight.x * BOX_SIZE + BOX_SIZE / 2.; light->y = localLight.y * BOX_SIZE + BOX_SIZE / 2.; light->z = localLight.z * BOX_SIZE + BOX_SIZE / 2.; world[localLight.x * MAP_SIZE * MAP_SIZE + localLight.y * MAP_SIZE + localLight.z].setLight(); SetCursorPos(window.getPosition().x + imageWidth / 2, window.getPosition().y + imageHeight / 2); window.sf::Window::setMouseCursorVisible(false); dim3 threads(16,16); dim3 blocks(imageWidth/threads.x,imageHeight/threads.y); hipLaunchKernelGGL(( traversePixels), dim3(blocks), dim3(threads), 0, 0, screen, cam, world, light); //hipDeviceSynchronize(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMemcpy(hostScreen, screen, imageHeight * imageWidth * sizeof(uint3), hipMemcpyDeviceToHost); for (int i = 0; i < imageHeight; i++) { for (int j = 0; j < imageWidth; j++) { image.setPixel(j, i, sf::Color(hostScreen[i * imageWidth + j].x, hostScreen[i * imageWidth + j].y, hostScreen[i * imageWidth + j].z)); } } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); sumScreenRenderTime += milliseconds; sf::Event event{}; while (window.pollEvent(event)) { if (event.type == sf::Event::Closed) window.close(); if (event.type == sf::Event::KeyPressed) { if (event.key.code == sf::Keyboard::Escape) window.close(); if (event.key.code == sf::Keyboard::V) drawCross = !drawCross; if (event.key.code == sf::Keyboard::I) printDebug(cam); if (event.key.code == sf::Keyboard::Up) { cam->speed++; } if (event.key.code == sf::Keyboard::Down) { cam->speed--; } } /*if (event.type == sf::Event::MouseButtonPressed){ if (event.mouseButton.button == sf::Mouse::Left) deleteVoxel(cam, world); if (event.mouseButton.button == sf::Mouse::Right) addVoxel(cam, world); }*/ } sf::Sprite crossSprite; crossSprite.setTexture(crossTexture); crossSprite.setPosition(imageWidth * 4 / 2. - crossSize / 2., imageHeight * 4 / 2. - crossSize / 2.); sf::Texture pixelsTexture; pixelsTexture.loadFromImage(image); sf::Sprite pixels; pixels.setTexture(pixelsTexture, true); if (sf::Mouse::isButtonPressed(sf::Mouse::Left)) { deleteVoxel(cam, world); } if (sf::Mouse::isButtonPressed(sf::Mouse::Right)) { addVoxel(cam, world); } POINT mousexy; GetCursorPos(&mousexy); int xt = window.getPosition().x + imageWidth / 2; int yt = window.getPosition().y + imageHeight / 2; cam->angleX += (xt - mousexy.x) / 6.; cam->angleY += (yt - mousexy.y) / 6.; SetCursorPos(xt, yt); if (cam->angleY > 89.) cam->angleY = 89.; if (cam->angleY < -89.) cam->angleY = -89.; buildDirectionOfStraightMove(cam, moveStraight); buildDirectionOfNotSoStraightMove(cam, moveNotStraight); if (sf::Keyboard::isKeyPressed(sf::Keyboard::W)) if (bounds(cam->eyePosition + Normalize(moveStraight) * cam->speed)) cam->eyePosition = cam->eyePosition + Normalize(moveStraight) * cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::S)) if (bounds(cam->eyePosition - Normalize(moveStraight) * cam->speed)) cam->eyePosition = cam->eyePosition - Normalize(moveStraight) * cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::A)) if (bounds(cam->eyePosition + Normalize(moveNotStraight) * cam->speed)) cam->eyePosition = cam->eyePosition + Normalize(moveNotStraight) * cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::D)) if (bounds(cam->eyePosition - Normalize(moveNotStraight) * cam->speed)) cam->eyePosition = cam->eyePosition - Normalize(moveNotStraight) * cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::LShift)) if (bounds(cam->eyePosition + make_double3(0, cam->speed, 0))) cam->eyePosition.y += cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::Space)) if (bounds(cam->eyePosition - make_double3(0, cam->speed, 0))) cam->eyePosition.y -= cam->speed; window.clear(sf::Color::Magenta); window.draw(pixels); if (drawCross) window.draw(crossSprite); window.display(); } hipFree(world); hipFree(screen); hipFree(cam); std::cout << "frames: " << frames-1 << std::endl; std::cout << "sumScreenRenderTime: " << sumScreenRenderTime << std::endl; return 0; }
e13616b591ea21d4e1bc6697b2328b1e2f464483.cu
#include <iostream> #include <SFML/Graphics.hpp> #include <SFML/OpenGL.hpp> #include <SFML/Window.hpp> #include <winuser.h> #include <Windows.h> #include "geometry.cuh" #include "voxel.cuh" #include <vector> #include <fstream> /* #ifdef _WIN32 #include <GL/gl.h> #include <GL/glu.h> #endif typedef union PixelInfo { std::uint32_t Colour; struct { std::uint8_t R, G, B, A; }; } *PPixelInfo; class Tga { private: std::vector<std::uint8_t> Pixels; bool ImageCompressed; std::uint32_t width, height, size, BitsPerPixel; public: Tga(const char* FilePath); std::vector<std::uint8_t> GetPixels() {return this->Pixels;} std::uint32_t GetWidth() const {return this->width;} std::uint32_t GetHeight() const {return this->height;} bool HasAlphaChannel() {return BitsPerPixel == 32;} }; Tga::Tga(const char* FilePath) { std::fstream hFile(FilePath, std::ios::in | std::ios::binary); if (!hFile.is_open()){throw std::invalid_argument("File Not Found.");} std::uint8_t Header[18] = {0}; std::vector<std::uint8_t> ImageData; static std::uint8_t DeCompressed[12] = {0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; static std::uint8_t IsCompressed[12] = {0x0, 0x0, 0xA, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; hFile.read(reinterpret_cast<char*>(&Header), sizeof(Header)); if (!std::memcmp(DeCompressed, &Header, sizeof(DeCompressed))) { BitsPerPixel = Header[16]; width = Header[13] * 256 + Header[12]; height = Header[15] * 256 + Header[14]; size = ((width * BitsPerPixel + 31) / 32) * 4 * height; if ((BitsPerPixel != 24) && (BitsPerPixel != 32)) { hFile.close(); throw std::invalid_argument("Invalid File Format. Required: 24 or 32 Bit Image."); } ImageData.resize(size); ImageCompressed = false; hFile.read(reinterpret_cast<char*>(ImageData.data()), size); } else if (!std::memcmp(IsCompressed, &Header, sizeof(IsCompressed))) { BitsPerPixel = Header[16]; width = Header[13] * 256 + Header[12]; height = Header[15] * 256 + Header[14]; size = ((width * BitsPerPixel + 31) / 32) * 4 * height; if ((BitsPerPixel != 24) && (BitsPerPixel != 32)) { hFile.close(); throw std::invalid_argument("Invalid File Format. Required: 24 or 32 Bit Image."); } PixelInfo Pixel = {0}; int CurrentByte = 0; std::size_t CurrentPixel = 0; ImageCompressed = true; std::uint8_t ChunkHeader = {0}; int BytesPerPixel = (BitsPerPixel / 8); ImageData.resize(width * height * sizeof(PixelInfo)); do { hFile.read(reinterpret_cast<char*>(&ChunkHeader), sizeof(ChunkHeader)); if(ChunkHeader < 128) { ++ChunkHeader; for(int I = 0; I < ChunkHeader; ++I, ++CurrentPixel) { hFile.read(reinterpret_cast<char*>(&Pixel), BytesPerPixel); ImageData[CurrentByte++] = Pixel.B; ImageData[CurrentByte++] = Pixel.G; ImageData[CurrentByte++] = Pixel.R; if (BitsPerPixel > 24) ImageData[CurrentByte++] = Pixel.A; } } else { ChunkHeader -= 127; hFile.read(reinterpret_cast<char*>(&Pixel), BytesPerPixel); for(int I = 0; I < ChunkHeader; ++I, ++CurrentPixel) { ImageData[CurrentByte++] = Pixel.B; ImageData[CurrentByte++] = Pixel.G; ImageData[CurrentByte++] = Pixel.R; if (BitsPerPixel > 24) ImageData[CurrentByte++] = Pixel.A; } } } while(CurrentPixel < (width * height)); } else { hFile.close(); throw std::invalid_argument("Invalid File Format. Required: 24 or 32 Bit TGA File."); } hFile.close(); this->Pixels = ImageData; } */ // TODO нормальные __host__ __device__ // TODO сделать класс углов || взять из готовой (cuBLAS??) // TODO текстуры + шрифт // TODO нормальная либа векторов // TODO переписать сообщения об ошибках на cuda error #define M_PI 3.1415 #define MAP_SIZE 200 #define distToViewPort 1.0 #define Vh 1.0 #define Vw 1.0 #define imageHeight 1024 #define imageWidth 1024 #define crossSize 64 #define BOX_SIZE 4 #define MAX_FRAMES 1000 //#define DEBUG struct Camera { public: Camera() = default; double3 eyePosition; double angleX, angleY; double speed; Camera(double3 &eye, double xAng, double yAng) : angleX(xAng), angleY(yAng), eyePosition(eye), speed(0.3) {} }; struct Ray { public: double3 source; double3 direction; }; class Box { public: __host__ __device__ Box() = default; __host__ __device__ Box(int X, int Y, int Z) : x(X), y(Y), z(Z) { updateMinMax(); } __host__ __device__ bool intersect(const Ray &, double t0, double t1, double3 &inter1Point, double3 &inter2Point) const; double3 bounds[2]{}; __host__ __device__ void inc_x() { x++; updateMinMax(); } __host__ __device__ void inc_y() { y++; updateMinMax(); } __host__ __device__ void inc_z() { z++; updateMinMax(); } __host__ __device__ void dec_x() { x--; updateMinMax(); } __host__ __device__ void dec_y() { y--; updateMinMax(); } __host__ __device__ void dec_z() { z--; updateMinMax(); } __host__ __device__ int get_x() { return x; } __host__ __device__ int get_y() { return y; } __host__ __device__ int get_z() { return z; } private: int x, y, z; __host__ __device__ void updateMinMax() { bounds[0] = make_double3(x * BOX_SIZE, y * BOX_SIZE, z * BOX_SIZE); bounds[1] = bounds[0] + make_double3(BOX_SIZE, BOX_SIZE, BOX_SIZE); } }; // ищет пересечение луча и коробки, записывает точки в interPoint __host__ __device__ bool Box::intersect(const Ray &r, double t0, double t1, double3 &inter1Point, double3 &inter2Point) const { double tmin, tmax, tymin, tymax, tzmin, tzmax; if (r.direction.x >= 0) { tmin = (bounds[0].x - r.source.x) / r.direction.x; tmax = (bounds[1].x - r.source.x) / r.direction.x; } else { tmin = (bounds[1].x - r.source.x) / r.direction.x; tmax = (bounds[0].x - r.source.x) / r.direction.x; } if (r.direction.y >= 0) { tymin = (bounds[0].y - r.source.y) / r.direction.y; tymax = (bounds[1].y - r.source.y) / r.direction.y; } else { tymin = (bounds[1].y - r.source.y) / r.direction.y; tymax = (bounds[0].y - r.source.y) / r.direction.y; } if ((tmin > tymax) || (tymin > tmax)) return false; if (tymin > tmin) tmin = tymin; if (tymax < tmax) tmax = tymax; if (r.direction.z >= 0) { tzmin = (bounds[0].z - r.source.z) / r.direction.z; tzmax = (bounds[1].z - r.source.z) / r.direction.z; } else { tzmin = (bounds[1].z - r.source.z) / r.direction.z; tzmax = (bounds[0].z - r.source.z) / r.direction.z; } if ((tmin > tzmax) || (tzmin > tmax)) return false; if (tzmin > tmin) tmin = tzmin; if (tzmax < tmax) tmax = tzmax; if (((tmin < t1) && (tmax > t0))) { inter1Point = r.source + r.direction * tmin; inter2Point = r.source + r.direction * tmax; return true; } return false; } __host__ __device__ void buildDirectionOfStraightMove(Camera *cam, double3 *move) { double3 origin = make_double3(0., 0., 1.); double3 xRotated = MakeRotationY((cam->angleX) * M_PI / 180.) * make_double3(1.0, 0.0, 0.0); double3 result = MakeRotationY((cam->angleX) * M_PI / 180.) * origin; result = MakeRotation(-cam->angleY * M_PI / 180., xRotated) * result; *move = result; } __host__ __device__ void buildDirectionOfNotSoStraightMove(Camera *cam, double3 *move) { double3 origin = make_double3(0., 0., 1.); double3 xRotated = MakeRotationY((cam->angleX) * M_PI / 180.) * make_double3(1.0, 0.0, 0.0); double3 result = MakeRotationY((cam->angleX) * M_PI / 180.) * origin; result = MakeRotation(-cam->angleY * M_PI / 180., xRotated) * result; double3 viewDirection = result; double3 y = make_double3(0., 1., 0.); *move = Cross(viewDirection, y); } __host__ __device__ Ray computePrimRay(Camera *cam, const int i, const int j) { double3 projectPlaneOrigin = make_double3(0, 0, distToViewPort); double3 rightArrow = make_double3(Vw, 0, 00), downArrow = make_double3(0.0, Vh, 0.0); double3 leftTopCorner = projectPlaneOrigin - rightArrow / 2 - downArrow / 2; double3 hitDotOnProjectPlane = leftTopCorner + rightArrow * (static_cast<double>(j) / imageWidth) + downArrow * (static_cast<double>(i) / imageHeight); double3 xRotated = MakeRotationY((cam->angleX) * M_PI / 180.) * make_double3(1.0, 0.0, 0.0); hitDotOnProjectPlane = MakeRotationY((cam->angleX) * M_PI / 180.) * hitDotOnProjectPlane; hitDotOnProjectPlane = MakeRotation(-cam->angleY * M_PI / 180., xRotated) * hitDotOnProjectPlane; hitDotOnProjectPlane = hitDotOnProjectPlane + cam->eyePosition; double3 dir = hitDotOnProjectPlane - cam->eyePosition; return {cam->eyePosition, dir}; } __host__ __device__ bool checkWorld(Box *box, voxel* world) { if (box->get_x() < 0 || box->get_y() < 0 || box->get_z() < 0 || box->get_x() >= MAP_SIZE || box->get_y() >= MAP_SIZE || box->get_z() >= MAP_SIZE) return 0; return world[box->get_x() * MAP_SIZE * MAP_SIZE + box->get_y() * MAP_SIZE + box->get_z()].isActive(); } /* (x, y, z) - индексы текущего бокса в world */ __device__ double3 traverseRay(int startX, int startY, int startZ, Ray &ray, int deep, voxel* world, Box *lastBox) { Box currentBox = Box(startX, startY, startZ); ray.direction = Normalize(&ray.direction); double3 deltaT; double t_x = ((BOX_SIZE - ray.source.x) / ray.direction.x), t_y = ((BOX_SIZE - ray.source.y) / ray.direction.y), t_z = ((BOX_SIZE - ray.source.z) / ray.direction.z) ; if (ray.direction.x < 0) { deltaT.x = -BOX_SIZE / ray.direction.x; t_x = (floor(ray.source.x / BOX_SIZE) * BOX_SIZE - ray.source.x) / ray.direction.x; } else { deltaT.x = BOX_SIZE / ray.direction.x; t_x = ((floor(ray.source.x / BOX_SIZE) + 1) * BOX_SIZE - ray.source.x) / ray.direction.x; } if (ray.direction.y < 0) { deltaT.y = -BOX_SIZE / ray.direction.y; t_y = (floor(ray.source.y / BOX_SIZE) * BOX_SIZE - ray.source.y) / ray.direction.y; } else { deltaT.y = BOX_SIZE / ray.direction.y; t_y = ((floor(ray.source.y / BOX_SIZE) + 1) * BOX_SIZE - ray.source.y) / ray.direction.y; } if (ray.direction.z < 0) { deltaT.z = -BOX_SIZE / ray.direction.z; t_z = (floor(ray.source.z / BOX_SIZE) * BOX_SIZE - ray.source.z) / ray.direction.z; } else { deltaT.z = BOX_SIZE / ray.direction.z; t_z = ((floor(ray.source.z / BOX_SIZE) + 1) * BOX_SIZE - ray.source.z) / ray.direction.z; } while (true) { if (currentBox.get_x() < 0 || currentBox.get_y() < 0 || currentBox.get_z() < 0 || currentBox.get_x() >= MAP_SIZE || currentBox.get_y() >= MAP_SIZE || currentBox.get_z() >= MAP_SIZE /*|| deep > MAP_SIZE * 2*/) { *lastBox = Box(-1, -1, -1); return make_double3(-1., -1., -1.); } double t = 0.; if (t_x < t_y) { if (t_x < t_z) { t = t_x; t_x += deltaT.x; // increment, next crossing along x if(ray.direction.x < 0) currentBox.dec_x(); else currentBox.inc_x(); } else { t = t_z; t_z += deltaT.z; // increment, next crossing along x if(ray.direction.z < 0) currentBox.dec_z(); else currentBox.inc_z(); } } else { if (t_y < t_z) { t = t_y; t_y += deltaT.y; // increment, next crossing along x if(ray.direction.y < 0) currentBox.dec_y(); else currentBox.inc_y(); } else { t = t_z; t_z += deltaT.z; // increment, next crossing along x if(ray.direction.z < 0) currentBox.dec_z(); else currentBox.inc_z(); } } if (checkWorld(&currentBox, world)) { *lastBox = currentBox; return ray.source + ray.direction * t; } deep++; } } /* копипаста траверс_рея для удаления блоков*/ __host__ __device__ bool hitRay(int startX, int startY, int startZ, Ray &ray, int deep, Box &boxToDelete, Box &boxToAdd, voxel* world) { const double eps = 0.000001; Box currentBox = Box(startX, startY, startZ); while (true) { if (currentBox.get_x() < 0 || currentBox.get_y() < 0 || currentBox.get_z() < 0 || currentBox.get_x() >= MAP_SIZE || currentBox.get_y() >= MAP_SIZE || currentBox.get_z() >= MAP_SIZE || deep > 150) return false; /* A1 < A2 : точки пересечения луча и бокса */ double3 A1 = double3(), A2 = double3(); if (currentBox.intersect(ray, 0, INFINITY, A1, A2)) { boxToAdd = currentBox; double3 A2_normalized = A2 - currentBox.bounds[0]; if (abs(A2_normalized.x) < eps) currentBox.dec_x(); if (abs(A2_normalized.y) < eps) currentBox.dec_y(); if (abs(A2_normalized.z) < eps) currentBox.dec_z(); if (abs(A2_normalized.x - BOX_SIZE) < eps) currentBox.inc_x(); if (abs(A2_normalized.y - BOX_SIZE) < eps) currentBox.inc_y(); if (abs(A2_normalized.z - BOX_SIZE) < eps) currentBox.inc_z(); if (checkWorld(&currentBox, world)) { boxToDelete = currentBox; return true; } } deep++; } } // TODO нужно ли делать это в хосте? __host__ __device__ void deleteVoxel(Camera *cam, voxel* world) { Ray hit = computePrimRay(cam, imageWidth / 2, imageHeight / 2); Box boxToDelete = Box(0, 0, 0), boxToAdd = Box(0, 0, 0); if (hitRay(static_cast<int>(cam->eyePosition.x / BOX_SIZE), static_cast<int>(cam->eyePosition.y / BOX_SIZE), static_cast<int>(cam->eyePosition.z / BOX_SIZE), hit, 5, boxToDelete, boxToAdd, world)) { int dx[] = { 1, 0, -1, 0, 0, 0}; int dy[] = { 0, 1, 0, -1, 0, 0 }; int dz[] = { 0, 0, 0, 0, 1, -1 }; for (int i = 0; i < 6; i++) { world[(boxToDelete.get_x() + dx[i]) * MAP_SIZE * MAP_SIZE + (boxToDelete.get_y() + dy[i]) * MAP_SIZE + boxToDelete.get_z() + dz[i]].setInactive(); } world[boxToDelete.get_x() * MAP_SIZE * MAP_SIZE + boxToDelete.get_y() * MAP_SIZE + boxToDelete.get_z()].setInactive(); } } __host__ __device__ void addVoxel(Camera *cam, voxel* world) { Ray hit = computePrimRay(cam, imageWidth / 2, imageHeight / 2); Box boxToAdd = Box(0, 0, 0), boxToDelete = Box(0, 0, 0); if (hitRay(static_cast<int>(cam->eyePosition.x / BOX_SIZE), static_cast<int>(cam->eyePosition.y / BOX_SIZE), static_cast<int>(cam->eyePosition.z / BOX_SIZE), hit, 5, boxToDelete, boxToAdd, world)) { world[boxToAdd.get_x() * MAP_SIZE * MAP_SIZE + boxToAdd.get_y() * MAP_SIZE + boxToAdd.get_z()].setActive(); } } __global__ void traversePixels(uint3 *screen, Camera *cam, voxel* world, double3 *lightSource) { __shared__ uint3 temp[512]; __shared__ double3 firstHitDots[512]; __shared__ Camera sharedCam; __shared__ double3 firstHitDotsNormalized[512]; sharedCam = *cam; double eps = 0.0000001; // int idx = blockIdx.x * blockDim.x + threadIdx.x; Box currBox = Box(); uchar3 color; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int idx = j + i * blockDim.x * gridDim.x; int linearThreadIdxInBlock = threadIdx.x + threadIdx.y * 16; Ray primRay = computePrimRay(cam, i, j); firstHitDots[linearThreadIdxInBlock] = traverseRay((static_cast<int>(sharedCam.eyePosition.x / BOX_SIZE)), (static_cast<int>(sharedCam.eyePosition.y / BOX_SIZE)), (static_cast<int>(sharedCam.eyePosition.z / BOX_SIZE)), primRay, 0, world, &currBox); double3 emptyConst = make_double3(-1., -1., -1.); if (firstHitDots[linearThreadIdxInBlock] == emptyConst) { /** мы не коснулись ничего = небо */ color = make_uchar3(21, 4, 133); } else if (checkWorld(&currBox, world) == 1) { __syncthreads(); int3 coordinatesOfVoxel = make_int3(currBox.get_x(), currBox.get_y(), currBox.get_z()); color = world[coordinatesOfVoxel.x * MAP_SIZE * MAP_SIZE + coordinatesOfVoxel.y * MAP_SIZE + coordinatesOfVoxel.z].color; double3 dir = firstHitDots[linearThreadIdxInBlock] - *lightSource; Ray shadowRay; shadowRay.source = *lightSource; shadowRay.direction = dir; double3 lastLightHit = traverseRay((static_cast<int>(lightSource->x / BOX_SIZE)), (static_cast<int>(lightSource->y / BOX_SIZE)), (static_cast<int>(lightSource->z / BOX_SIZE)), shadowRay, 0, world, &currBox); cudaDeviceSynchronize(); /*if (firstHitDots[linearThreadIdxInBlock].y / BOX_SIZE == MAP_SIZE - 10) color = make_uint3(198, 42, 136);*/ //cudaDeviceSynchronize(); if (!(lastLightHit == firstHitDots[linearThreadIdxInBlock])) { /** случай когда точка падения полностью в тени */ color = color * 0.2; } else { /** случай когда свет дошел до точки */ /** найти на какой грани лежит точка firstHitDot */ firstHitDotsNormalized[linearThreadIdxInBlock] = firstHitDots[linearThreadIdxInBlock] - make_double3(round(firstHitDots[linearThreadIdxInBlock].x / BOX_SIZE.) * BOX_SIZE, round(firstHitDots[linearThreadIdxInBlock].y / BOX_SIZE.) * BOX_SIZE, round(firstHitDots[linearThreadIdxInBlock].z / BOX_SIZE.) * BOX_SIZE); double3 normal = make_double3(0., 0., 0.); //cudaDeviceSynchronize(); if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].x) < eps) normal.x = -1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].x - BOX_SIZE) < eps) normal.x = +1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].y) < eps) normal.y = -1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].y - BOX_SIZE) < eps) normal.y = +1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].z) < eps) normal.z = -1.; if (abs(firstHitDotsNormalized[linearThreadIdxInBlock].z - BOX_SIZE) < eps) normal.z = +1.; double lightIntensity = 0.2; double cosx = Dot(normal, shadowRay.direction * -1.) / Magnitude(normal) / Magnitude(shadowRay.direction); double diffuser = (Magnitude(firstHitDots[linearThreadIdxInBlock] - *lightSource)); //cosx = 130000 * cosx / (diffuser * diffuser); if (cosx >= eps) lightIntensity += cosx; if (lightIntensity > 1.) lightIntensity = 1.0; color = color * lightIntensity; //cudaDeviceSynchronize(); } } else { /** куб Валера */ color = make_uchar3(255, 255, 255); } temp[linearThreadIdxInBlock].x = (static_cast<unsigned char>(color.x)); temp[linearThreadIdxInBlock].y = (static_cast<unsigned char>(color.y)); temp[linearThreadIdxInBlock].z = (static_cast<unsigned char>(color.z)); __syncthreads(); screen[idx].x = temp[linearThreadIdxInBlock].x; screen[idx].y = temp[linearThreadIdxInBlock].y; screen[idx].z = temp[linearThreadIdxInBlock].z; } /* std::chrono::milliseconds start_time; __host__ __device__ void generateMap(unsigned int *world) { static double t1 = 0.001, t2 = 0.001; std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::system_clock::now().time_since_epoch() ) - start_time; double delta = M_PI; int x1 = static_cast<int>(10*cos(t1)) + MAP_SIZE / 2; int z1 = static_cast<int>(10*sin(t1)) + MAP_SIZE / 2; int y1 = static_cast<int>(t1*3.) + BOX_SIZE * 2; int x2 = static_cast<int>(10*cos(-t2-delta)) + MAP_SIZE / 2; int z2 = static_cast<int>(10*sin(-t2-delta)) + MAP_SIZE / 2; int y2 = static_cast<int>(t2*5.) + BOX_SIZE * 2; if((x1 >= MAP_SIZE || y1 >= MAP_SIZE || z1 >= MAP_SIZE) || (x2 >= MAP_SIZE || y2 >= MAP_SIZE || z2 >= MAP_SIZE)) return; int idx = x1 * MAP_SIZE * MAP_SIZE + y1 * MAP_SIZE + z1; world[idx] = 1; idx = x2 * MAP_SIZE * MAP_SIZE + y2 * MAP_SIZE + z2; if(t1 > delta) { world[idx] = 1; t2 += 0.05; } t1 += 0.05; //for (int i = 0; i < MAP_SIZE * MAP_SIZE * MAP_SIZE; i++) // world[i] = (rand() % 1000 == 0); } */ bool bounds(double3 pos) { if (pos.x >= (MAP_SIZE - 1) * BOX_SIZE || pos.y >= (MAP_SIZE - 1) * BOX_SIZE || pos.z >= ((MAP_SIZE - 1) * BOX_SIZE) || pos.x <= 0 || pos.y <= 0 || pos.z <= 0) return false; return true; } void printDebug(Camera *cam) { printf("angleX: %lf\n", cam->angleX); printf("angleY: %lf\n", cam->angleY); printf("eyePosition: (%lf, %lf, %lf)\n", cam->eyePosition.x, cam->eyePosition.y, cam->eyePosition.z); } int main() { /* Tga info = Tga("C:/Users/...../Desktop/SomeTGA.tga"); GLuint texture = 0; glGenTextures(1, &texture); glBindTexture(GL_TEXTURE_2D, texture); glTexImage2D(GL_TEXTURE_2D, 0, info.HasAlphaChannel() ? GL_RGBA : GL_RGB, info.GetWidth(), info.GetWidth(), 0, info.HasAlphaChannel() ? GL_RGBA : GL_RGB, GL_UNSIGNED_BYTE, info.GetPixels().data()); */ int frames = 0; float sumScreenRenderTime = 0.0; voxel *world; Camera *cam; uint3 *screen; double3* light; if (cudaMallocManaged(&world, MAP_SIZE * MAP_SIZE * MAP_SIZE * sizeof(world[0]))) fprintf(stderr, "cuda malloc error: world"); if (cudaMallocManaged(&screen, imageHeight * imageWidth * sizeof(uint3))) fprintf(stderr, "cuda malloc error: screen"); if (cudaMallocManaged(&cam, sizeof(Camera))) fprintf(stderr, "cuda malloc error: camera"); if (cudaMallocManaged(&light, sizeof(double3))) fprintf(stderr, "cuda malloc error: light"); uint3 localLight = make_uint3(MAP_SIZE / 2, 15, MAP_SIZE / 2); auto *hostScreen = static_cast<uint3 *>(malloc(imageHeight * imageWidth * sizeof(uint3))); int blocksCnt = 0; for (int i = 0; i < MAP_SIZE * MAP_SIZE * MAP_SIZE; i++) { int x, y, z; x = i / MAP_SIZE / MAP_SIZE; y = i / MAP_SIZE % MAP_SIZE; z = i % MAP_SIZE; int R = 35; if ((x - MAP_SIZE / 2) * (x - MAP_SIZE / 2) + (y - (MAP_SIZE - 2 * R)) * (y - (MAP_SIZE - 2 * R)) + (z - MAP_SIZE / 2) * (z - MAP_SIZE / 2) <= R * R) { world[i].setActive(); world[i].setColor(rand()%256, rand()%256, rand()%256); } if (y == MAP_SIZE - 10) { world[i].setActive(); world[i].setColor(0, 255, 0); } blocksCnt += world[i].isActive(); } cudaDeviceSynchronize(); printf("Num of voxels: %d\n", blocksCnt); double3 eyePosition = make_double3(64.636510, 1.0, 294.136342); cam->eyePosition = eyePosition; cam->angleX = 234.833333; cam->angleY = -28.666667; cam->speed = 5.0; sf::Color backgroundColor = sf::Color::Black; sf::RenderWindow window(sf::VideoMode(imageHeight, imageWidth), "lol"); sf::Image image; image.create(imageHeight, imageWidth, sf::Color::Magenta); bool drawCross = true; sf::Texture crossTexture; if (!crossTexture.loadFromFile("cross.png", sf::IntRect(0, 0, crossSize, crossSize))) { fprintf(stderr, "Error loading cross.jpg\n"); } // TODO потом перепишем double3 *moveStraight; cudaMallocManaged(&moveStraight, sizeof(double3)); double3 *moveNotStraight; cudaMallocManaged(&moveNotStraight, sizeof(double3)); double t = 0.0; localLight.x = static_cast<int>(10 * cos(t)) + MAP_SIZE / 2; localLight.y = static_cast<int>(10 * sin(t)) + MAP_SIZE / 2; localLight.z = 10; while (window.isOpen()) { world[localLight.x * MAP_SIZE * MAP_SIZE + localLight.y * MAP_SIZE + localLight.z].setInactive(); localLight.x = static_cast<int>(40 * cos(t)) + MAP_SIZE / 2; localLight.y = static_cast<int>(40 * sin(t)) + MAP_SIZE / 2; localLight.z = 10; t += 0.05; light->x = localLight.x * BOX_SIZE + BOX_SIZE / 2.; light->y = localLight.y * BOX_SIZE + BOX_SIZE / 2.; light->z = localLight.z * BOX_SIZE + BOX_SIZE / 2.; world[localLight.x * MAP_SIZE * MAP_SIZE + localLight.y * MAP_SIZE + localLight.z].setLight(); SetCursorPos(window.getPosition().x + imageWidth / 2, window.getPosition().y + imageHeight / 2); window.sf::Window::setMouseCursorVisible(false); dim3 threads(16,16); dim3 blocks(imageWidth/threads.x,imageHeight/threads.y); traversePixels<<<blocks, threads>>>(screen, cam, world, light); //cudaDeviceSynchronize(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(hostScreen, screen, imageHeight * imageWidth * sizeof(uint3), cudaMemcpyDeviceToHost); for (int i = 0; i < imageHeight; i++) { for (int j = 0; j < imageWidth; j++) { image.setPixel(j, i, sf::Color(hostScreen[i * imageWidth + j].x, hostScreen[i * imageWidth + j].y, hostScreen[i * imageWidth + j].z)); } } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); sumScreenRenderTime += milliseconds; sf::Event event{}; while (window.pollEvent(event)) { if (event.type == sf::Event::Closed) window.close(); if (event.type == sf::Event::KeyPressed) { if (event.key.code == sf::Keyboard::Escape) window.close(); if (event.key.code == sf::Keyboard::V) drawCross = !drawCross; if (event.key.code == sf::Keyboard::I) printDebug(cam); if (event.key.code == sf::Keyboard::Up) { cam->speed++; } if (event.key.code == sf::Keyboard::Down) { cam->speed--; } } /*if (event.type == sf::Event::MouseButtonPressed){ if (event.mouseButton.button == sf::Mouse::Left) deleteVoxel(cam, world); if (event.mouseButton.button == sf::Mouse::Right) addVoxel(cam, world); }*/ } sf::Sprite crossSprite; crossSprite.setTexture(crossTexture); crossSprite.setPosition(imageWidth * 4 / 2. - crossSize / 2., imageHeight * 4 / 2. - crossSize / 2.); sf::Texture pixelsTexture; pixelsTexture.loadFromImage(image); sf::Sprite pixels; pixels.setTexture(pixelsTexture, true); if (sf::Mouse::isButtonPressed(sf::Mouse::Left)) { deleteVoxel(cam, world); } if (sf::Mouse::isButtonPressed(sf::Mouse::Right)) { addVoxel(cam, world); } POINT mousexy; GetCursorPos(&mousexy); int xt = window.getPosition().x + imageWidth / 2; int yt = window.getPosition().y + imageHeight / 2; cam->angleX += (xt - mousexy.x) / 6.; cam->angleY += (yt - mousexy.y) / 6.; SetCursorPos(xt, yt); if (cam->angleY > 89.) cam->angleY = 89.; if (cam->angleY < -89.) cam->angleY = -89.; buildDirectionOfStraightMove(cam, moveStraight); buildDirectionOfNotSoStraightMove(cam, moveNotStraight); if (sf::Keyboard::isKeyPressed(sf::Keyboard::W)) if (bounds(cam->eyePosition + Normalize(moveStraight) * cam->speed)) cam->eyePosition = cam->eyePosition + Normalize(moveStraight) * cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::S)) if (bounds(cam->eyePosition - Normalize(moveStraight) * cam->speed)) cam->eyePosition = cam->eyePosition - Normalize(moveStraight) * cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::A)) if (bounds(cam->eyePosition + Normalize(moveNotStraight) * cam->speed)) cam->eyePosition = cam->eyePosition + Normalize(moveNotStraight) * cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::D)) if (bounds(cam->eyePosition - Normalize(moveNotStraight) * cam->speed)) cam->eyePosition = cam->eyePosition - Normalize(moveNotStraight) * cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::LShift)) if (bounds(cam->eyePosition + make_double3(0, cam->speed, 0))) cam->eyePosition.y += cam->speed; if (sf::Keyboard::isKeyPressed(sf::Keyboard::Space)) if (bounds(cam->eyePosition - make_double3(0, cam->speed, 0))) cam->eyePosition.y -= cam->speed; window.clear(sf::Color::Magenta); window.draw(pixels); if (drawCross) window.draw(crossSprite); window.display(); } cudaFree(world); cudaFree(screen); cudaFree(cam); std::cout << "frames: " << frames-1 << std::endl; std::cout << "sumScreenRenderTime: " << sumScreenRenderTime << std::endl; return 0; }
c99f06efa54833116602c7b08152441d76021e37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from zgeellrtmv.cu normal z -> s, Sun May 3 11:22:58 2015 */ #include "common_magma.h" //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void sgeellrtmv_kernel_32( int num_rows, int num_cols, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowlength, float * dx, float beta, float * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void sgeellrtmv_kernel_16( int num_rows, int num_cols, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowlength, float * dx, float beta, float * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void sgeellrtmv_kernel_8( int num_rows, int num_cols, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowlength, float * dx, float beta, float * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLRT. The ideas are taken from "Improving the performance of the sparse matrix vector product with GPUs", (CIT 2010), and modified to provide correct values. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows @param[in] n magma_int_t number of columns @param[in] nnz_per_row magma_int_t max number of nonzeros in a row @param[in] alpha float scalar alpha @param[in] dval magmaFloat_ptr val array @param[in] dcolind magmaIndex_ptr col indices @param[in] drowlength magmaIndex_ptr number of elements in each row @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar beta @param[out] dy magmaFloat_ptr output vector y @param[in] blocksize magma_int_t threads per block @param[in] alignment magma_int_t threads assigned to each row @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeellrtmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_int_t alignment, magma_int_t blocksize, magma_queue_t queue ) { int num_blocks = magma_ceildiv( m, blocksize ); magma_int_t num_threads = alignment*blocksize; magma_int_t threads = alignment*blocksize; int real_row_length = magma_roundup( nnz_per_row, alignment ); magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = (int) sqrt( (float) num_blocks ); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 grid( dimgrid1, dimgrid2, 1); int Ms = alignment * blocksize * sizeof( float ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( alignment == 32 ) { hipLaunchKernelGGL(( sgeellrtmv_kernel_32), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 16 ) { hipLaunchKernelGGL(( sgeellrtmv_kernel_16), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 8 ) { hipLaunchKernelGGL(( sgeellrtmv_kernel_8), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } return MAGMA_SUCCESS; }
c99f06efa54833116602c7b08152441d76021e37.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from zgeellrtmv.cu normal z -> s, Sun May 3 11:22:58 2015 */ #include "common_magma.h" //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void sgeellrtmv_kernel_32( int num_rows, int num_cols, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowlength, float * dx, float beta, float * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void sgeellrtmv_kernel_16( int num_rows, int num_cols, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowlength, float * dx, float beta, float * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void sgeellrtmv_kernel_8( int num_rows, int num_cols, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowlength, float * dx, float beta, float * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ float shared[]; if(i < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //float val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) float val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLRT. The ideas are taken from "Improving the performance of the sparse matrix vector product with GPUs", (CIT 2010), and modified to provide correct values. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows @param[in] n magma_int_t number of columns @param[in] nnz_per_row magma_int_t max number of nonzeros in a row @param[in] alpha float scalar alpha @param[in] dval magmaFloat_ptr val array @param[in] dcolind magmaIndex_ptr col indices @param[in] drowlength magmaIndex_ptr number of elements in each row @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar beta @param[out] dy magmaFloat_ptr output vector y @param[in] blocksize magma_int_t threads per block @param[in] alignment magma_int_t threads assigned to each row @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeellrtmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_int_t alignment, magma_int_t blocksize, magma_queue_t queue ) { int num_blocks = magma_ceildiv( m, blocksize ); magma_int_t num_threads = alignment*blocksize; magma_int_t threads = alignment*blocksize; int real_row_length = magma_roundup( nnz_per_row, alignment ); magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = (int) sqrt( (float) num_blocks ); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 grid( dimgrid1, dimgrid2, 1); int Ms = alignment * blocksize * sizeof( float ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( alignment == 32 ) { sgeellrtmv_kernel_32<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 16 ) { sgeellrtmv_kernel_16<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 8 ) { sgeellrtmv_kernel_8<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } return MAGMA_SUCCESS; }
c80c52ab2eea12d98b3515f55daecee9b67ddf04.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************//** * @author Xavier Loose * ******************************************************************************/ /***************************************************************************//** * @author Xavier * * @par Description: VectorAdd is the first example and I feel that implementing * it is still useful. The order will be a function implemented by an auto version. * The goal of * ******************************************************************************/ __global__ void vectorAdd(float* d_A, float* d_B, float* d_C, int size) { // Vectors are one dimensional so we shouldn't need to handle matrix orientations int loc = blockDim.x * blockIdx.x + threadIdx.; //TODO: Make this function unsigned int computes = size/getThreadCount; for ( int i = 0; i < computes; i++ ) if (loc + j * computes < size) } /***************************************************************************//** * * * ******************************************************************************/ __global__ void vectorAddAuto(float* d_A, float* d_B, float* d_C, int size) { } __global__ void matrixMul() { } __global__ void matrixMulAdd() { }
c80c52ab2eea12d98b3515f55daecee9b67ddf04.cu
/***************************************************************************//** * @author Xavier Loose * ******************************************************************************/ /***************************************************************************//** * @author Xavier * * @par Description: VectorAdd is the first example and I feel that implementing * it is still useful. The order will be a function implemented by an auto version. * The goal of * ******************************************************************************/ __global__ void vectorAdd(float* d_A, float* d_B, float* d_C, int size) { // Vectors are one dimensional so we shouldn't need to handle matrix orientations int loc = blockDim.x * blockIdx.x + threadIdx.; //TODO: Make this function unsigned int computes = size/getThreadCount; for ( int i = 0; i < computes; i++ ) if (loc + j * computes < size) } /***************************************************************************//** * * * ******************************************************************************/ __global__ void vectorAddAuto(float* d_A, float* d_B, float* d_C, int size) { } __global__ void matrixMul() { } __global__ void matrixMulAdd() { }
88b2c2ccb7888ab3d78531a2274dbf5e96fb04d5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vvlatbnd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int nx = 1; int ny = 1; DECNUM *uu = NULL; hipMalloc(&uu, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vvlatbnd), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,uu); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vvlatbnd), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,uu); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vvlatbnd), dim3(gridBlock),dim3(threadBlock), 0, 0, nx,ny,uu); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
88b2c2ccb7888ab3d78531a2274dbf5e96fb04d5.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vvlatbnd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int nx = 1; int ny = 1; DECNUM *uu = NULL; cudaMalloc(&uu, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vvlatbnd<<<gridBlock,threadBlock>>>(nx,ny,uu); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vvlatbnd<<<gridBlock,threadBlock>>>(nx,ny,uu); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vvlatbnd<<<gridBlock,threadBlock>>>(nx,ny,uu); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3c9e885fba7dfa8cc5ba827cdf0b20cd36c24166.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <iostream> #include "caffe/common.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/util/deformable_im2col.hpp" using namespace std; namespace caffe { template <typename Dtype> __device__ Dtype deformable_im2col_bilinear(const Dtype* bottom_data, const int data_width, const int height, const int width, Dtype h, Dtype w) { int h_low = floor(h); int w_low = floor(w); int h_high; int w_high; if (h_low >= height - 1) { h_high = h_low = height - 1; h = (Dtype)h_low; } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = (Dtype)w_low; } else { w_high = w_low + 1; } Dtype lh = h - h_low; Dtype lw = w - w_low; Dtype hh = 1 - lh, hw = 1 - lw; Dtype v1 = bottom_data[h_low * data_width + w_low]; Dtype v2 = bottom_data[h_low * data_width + w_high]; Dtype v3 = bottom_data[h_high * data_width + w_low]; Dtype v4 = bottom_data[h_high * data_width + w_high]; Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename Dtype> __device__ Dtype get_gradient_weight(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } argmax_h = max(argmax_h, (Dtype)0.0f); argmax_w = max(argmax_w, (Dtype)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename Dtype> __device__ Dtype get_coordinate_weight(Dtype argmax_h, Dtype argmax_w, const int height, const int width, const Dtype* im_data, const int data_width, const int bp_dir) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } if (argmax_h < 0) argmax_h = 0; if (argmax_w < 0) argmax_w = 0; int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (bp_dir == 0) { weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename Dtype> __global__ void deformable_im2col_gpu_kernel(const int n, const Dtype* data_im, const Dtype* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; Dtype* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const Dtype* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_in) * width + w_in;//0 const Dtype* data_offset_ptr = data_offset; data_offset_ptr += deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;//0 for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype val = static_cast<Dtype>(0); const Dtype h_im = h_in + i * dilation_h + offset_h; const Dtype w_im = w_in + j * dilation_w + offset_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const Dtype map_h = i * dilation_h + offset_h; const Dtype map_w = j * dilation_w + offset_w; const int cur_height = height - h_in; const int cur_width = width - w_in; val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } template <typename Dtype> void deformable_im2col_gpu(const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; int channel_per_deformable_group = channels/ deformable_group; hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, data_offset, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } template void deformable_im2col_gpu<float>(const float* data_im, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* data_col); template void deformable_im2col_gpu<double>(const double* data_im, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* data_col); template <typename Dtype> __global__ void deformable_col2im_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, int channel_per_deformable_group, int height_col, int width_col, Dtype* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; const Dtype cur_inv_h_data = h_in + i * dilation_h + offset_h; const Dtype cur_inv_w_data = w_in + j * dilation_w + offset_w; const Dtype cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; Dtype weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); caffe_gpu_atomic_add(weight * cur_top_grad, grad_im + cur_bottom_grad_pos); } } } } } template <typename Dtype> void deformable_col2im_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels,const int height, const int width,const int num_kernels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int channel_per_deformable_group = channels / deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( deformable_col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, data_offset,channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_im); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_gpu<float>(const float* data_col, const float* data_offset, const int channels, const int height, const int width,const int num_kernels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_im); template void deformable_col2im_gpu<double>(const double* data_col, const double* data_offset, const int channels, const int height, const int width, const int num_kernels,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* grad_im); template <typename Dtype> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* grad_offset) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = index / width_col / height_col; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const Dtype* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col; const Dtype* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = ((col_c * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col) % kernel_w; int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype inv_h = h_in + i * dilation_h + offset_h; Dtype inv_w = w_in + j * dilation_w + offset_w; if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -1; } const Dtype weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } template <typename Dtype> void deformable_col2im_coord_gpu(const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * kernel_h * kernel_h * deformable_group; int channel_per_deformable_group = channels/ deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, data_im,data_offset, channels,height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_offset); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_coord_gpu<float>(const float* data_col, const float* data_im,const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_offset); template void deformable_col2im_coord_gpu<double>(const double* data_col, const double* data_im,const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* grad_offset); }
3c9e885fba7dfa8cc5ba827cdf0b20cd36c24166.cu
#include <algorithm> #include <iostream> #include "caffe/common.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/util/deformable_im2col.hpp" using namespace std; namespace caffe { template <typename Dtype> __device__ Dtype deformable_im2col_bilinear(const Dtype* bottom_data, const int data_width, const int height, const int width, Dtype h, Dtype w) { int h_low = floor(h); int w_low = floor(w); int h_high; int w_high; if (h_low >= height - 1) { h_high = h_low = height - 1; h = (Dtype)h_low; } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = (Dtype)w_low; } else { w_high = w_low + 1; } Dtype lh = h - h_low; Dtype lw = w - w_low; Dtype hh = 1 - lh, hw = 1 - lw; Dtype v1 = bottom_data[h_low * data_width + w_low]; Dtype v2 = bottom_data[h_low * data_width + w_high]; Dtype v3 = bottom_data[h_high * data_width + w_low]; Dtype v4 = bottom_data[h_high * data_width + w_high]; Dtype w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; Dtype val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename Dtype> __device__ Dtype get_gradient_weight(Dtype argmax_h, Dtype argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } argmax_h = max(argmax_h, (Dtype)0.0f); argmax_w = max(argmax_w, (Dtype)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename Dtype> __device__ Dtype get_coordinate_weight(Dtype argmax_h, Dtype argmax_w, const int height, const int width, const Dtype* im_data, const int data_width, const int bp_dir) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { //empty return 0; } if (argmax_h < 0) argmax_h = 0; if (argmax_w < 0) argmax_w = 0; int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (Dtype)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (Dtype)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } Dtype weight = 0; if (bp_dir == 0) { weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } /*! * \brief deformable_im2col gpu kernel. * DO NOT call this directly. Use wrapper function im2col() instead; */ template <typename Dtype> __global__ void deformable_im2col_gpu_kernel(const int n, const Dtype* data_im, const Dtype* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; Dtype* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const Dtype* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_in) * width + w_in;//0 const Dtype* data_offset_ptr = data_offset; data_offset_ptr += deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col;//0 for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype val = static_cast<Dtype>(0); const Dtype h_im = h_in + i * dilation_h + offset_h; const Dtype w_im = w_in + j * dilation_w + offset_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const Dtype map_h = i * dilation_h + offset_h; const Dtype map_w = j * dilation_w + offset_w; const int cur_height = height - h_in; const int cur_width = width - w_in; val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } template <typename Dtype> void deformable_im2col_gpu(const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; int channel_per_deformable_group = channels/ deformable_group; deformable_im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, data_offset, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } template void deformable_im2col_gpu<float>(const float* data_im, const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* data_col); template void deformable_im2col_gpu<double>(const double* data_im, const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* data_col); template <typename Dtype> __global__ void deformable_col2im_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, int channel_per_deformable_group, int height_col, int width_col, Dtype* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col) % kernel_w; const int i = (index / width_col / height_col / kernel_w) % kernel_h; const int c = index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; const Dtype cur_inv_h_data = h_in + i * dilation_h + offset_h; const Dtype cur_inv_w_data = w_in + j * dilation_w + offset_w; const Dtype cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx; Dtype weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); caffe_gpu_atomic_add(weight * cur_top_grad, grad_im + cur_bottom_grad_pos); } } } } } template <typename Dtype> void deformable_col2im_gpu(const Dtype* data_col, const Dtype* data_offset, const int channels,const int height, const int width,const int num_kernels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int channel_per_deformable_group = channels / deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) deformable_col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_col, data_offset,channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_im); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_gpu<float>(const float* data_col, const float* data_offset, const int channels, const int height, const int width,const int num_kernels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_im); template void deformable_col2im_gpu<double>(const double* data_col, const double* data_offset, const int channels, const int height, const int width, const int num_kernels,const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* grad_im); template <typename Dtype> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int height_col, const int width_col, Dtype* grad_offset) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = index / width_col / height_col; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const Dtype* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * width_col * height_col; const Dtype* data_im_ptr = data_im + deformable_group_index * channel_per_deformable_group / kernel_h / kernel_w * height * width; const Dtype* data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = ((col_c * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col) % kernel_w; int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const Dtype offset_h = data_offset_ptr[data_offset_h_ptr]; const Dtype offset_w = data_offset_ptr[data_offset_w_ptr]; Dtype inv_h = h_in + i * dilation_h + offset_h; Dtype inv_w = w_in + j * dilation_w + offset_w; if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -1; } const Dtype weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } template <typename Dtype> void deformable_col2im_coord_gpu(const Dtype* data_col, const Dtype* data_im, const Dtype* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, Dtype* grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * kernel_h * kernel_h * deformable_group; int channel_per_deformable_group = channels/ deformable_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) deformable_col2im_coord_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_col, data_im,data_offset, channels,height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, height_col, width_col, grad_offset); CUDA_POST_KERNEL_CHECK; } template void deformable_col2im_coord_gpu<float>(const float* data_col, const float* data_im,const float* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_offset); template void deformable_col2im_coord_gpu<double>(const double* data_col, const double* data_im,const double* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double* grad_offset); }
d0d54848cccdd167de4a53e0b5c65f357e7bfc3c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "auto_arima.cuh" #include <cuml/tsa/auto_arima.h> namespace ML { int divide_by_mask_build_index(const raft::handle_t& handle, const bool* d_mask, int* d_index, int batch_size) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); return ML::TimeSeries::divide_by_mask_build_index(d_mask, d_index, batch_size, allocator, stream); } template <typename DataT> inline void divide_by_mask_execute_helper(const raft::handle_t& handle, const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int batch_size, int n_obs) { hipStream_t stream = handle.get_stream(); ML::TimeSeries::divide_by_mask_execute(d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs, stream); } void divide_by_mask_execute(const raft::handle_t& handle, const float* d_in, const bool* d_mask, const int* d_index, float* d_out0, float* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const double* d_in, const bool* d_mask, const int* d_index, double* d_out0, double* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const int* d_in, const bool* d_mask, const int* d_index, int* d_out0, int* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } template <typename DataT> inline void divide_by_min_build_index_helper(const raft::handle_t& handle, const DataT* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::divide_by_min_build_index( d_matrix, d_batch, d_index, h_size, batch_size, n_sub, allocator, stream); } void divide_by_min_build_index(const raft::handle_t& handle, const float* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } void divide_by_min_build_index(const raft::handle_t& handle, const double* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } template <typename DataT> inline void divide_by_min_execute_helper(const raft::handle_t& handle, const DataT* d_in, const int* d_batch, const int* d_index, DataT** hd_out, int batch_size, int n_sub, int n_obs) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::divide_by_min_execute(d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs, allocator, stream); } void divide_by_min_execute(const raft::handle_t& handle, const float* d_in, const int* d_batch, const int* d_index, float** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const double* d_in, const int* d_batch, const int* d_index, double** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const int* d_in, const int* d_batch, const int* d_index, int** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void build_division_map(const raft::handle_t& handle, const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::build_division_map(hd_id, h_size, d_id_to_pos, d_id_to_model, batch_size, n_sub, allocator, stream); } template <typename DataT> inline void merge_series_helper(const raft::handle_t& handle, const DataT* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int batch_size, int n_sub, int n_obs) { hipStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::merge_series(hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs, allocator, stream); } void merge_series(const raft::handle_t& handle, const float* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, float* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } void merge_series(const raft::handle_t& handle, const double* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, double* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } } // namespace ML
d0d54848cccdd167de4a53e0b5c65f357e7bfc3c.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "auto_arima.cuh" #include <cuml/tsa/auto_arima.h> namespace ML { int divide_by_mask_build_index(const raft::handle_t& handle, const bool* d_mask, int* d_index, int batch_size) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); return ML::TimeSeries::divide_by_mask_build_index(d_mask, d_index, batch_size, allocator, stream); } template <typename DataT> inline void divide_by_mask_execute_helper(const raft::handle_t& handle, const DataT* d_in, const bool* d_mask, const int* d_index, DataT* d_out0, DataT* d_out1, int batch_size, int n_obs) { cudaStream_t stream = handle.get_stream(); ML::TimeSeries::divide_by_mask_execute(d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs, stream); } void divide_by_mask_execute(const raft::handle_t& handle, const float* d_in, const bool* d_mask, const int* d_index, float* d_out0, float* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const double* d_in, const bool* d_mask, const int* d_index, double* d_out0, double* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } void divide_by_mask_execute(const raft::handle_t& handle, const int* d_in, const bool* d_mask, const int* d_index, int* d_out0, int* d_out1, int batch_size, int n_obs) { divide_by_mask_execute_helper(handle, d_in, d_mask, d_index, d_out0, d_out1, batch_size, n_obs); } template <typename DataT> inline void divide_by_min_build_index_helper(const raft::handle_t& handle, const DataT* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::divide_by_min_build_index( d_matrix, d_batch, d_index, h_size, batch_size, n_sub, allocator, stream); } void divide_by_min_build_index(const raft::handle_t& handle, const float* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } void divide_by_min_build_index(const raft::handle_t& handle, const double* d_matrix, int* d_batch, int* d_index, int* h_size, int batch_size, int n_sub) { divide_by_min_build_index_helper(handle, d_matrix, d_batch, d_index, h_size, batch_size, n_sub); } template <typename DataT> inline void divide_by_min_execute_helper(const raft::handle_t& handle, const DataT* d_in, const int* d_batch, const int* d_index, DataT** hd_out, int batch_size, int n_sub, int n_obs) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::divide_by_min_execute(d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs, allocator, stream); } void divide_by_min_execute(const raft::handle_t& handle, const float* d_in, const int* d_batch, const int* d_index, float** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const double* d_in, const int* d_batch, const int* d_index, double** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void divide_by_min_execute(const raft::handle_t& handle, const int* d_in, const int* d_batch, const int* d_index, int** hd_out, int batch_size, int n_sub, int n_obs) { divide_by_min_execute_helper(handle, d_in, d_batch, d_index, hd_out, batch_size, n_sub, n_obs); } void build_division_map(const raft::handle_t& handle, const int* const* hd_id, const int* h_size, int* d_id_to_pos, int* d_id_to_model, int batch_size, int n_sub) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::build_division_map(hd_id, h_size, d_id_to_pos, d_id_to_model, batch_size, n_sub, allocator, stream); } template <typename DataT> inline void merge_series_helper(const raft::handle_t& handle, const DataT* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, DataT* d_out, int batch_size, int n_sub, int n_obs) { cudaStream_t stream = handle.get_stream(); auto allocator = handle.get_device_allocator(); ML::TimeSeries::merge_series(hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs, allocator, stream); } void merge_series(const raft::handle_t& handle, const float* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, float* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } void merge_series(const raft::handle_t& handle, const double* const* hd_in, const int* d_id_to_pos, const int* d_id_to_sub, double* d_out, int batch_size, int n_sub, int n_obs) { merge_series_helper(handle, hd_in, d_id_to_pos, d_id_to_sub, d_out, batch_size, n_sub, n_obs); } } // namespace ML
4d8a7abbf8474fd2400b0439dd49e3955d5c5376.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Calculate the center of mass momentum. */ extern "C" __global__ void calcCenterOfMassMomentum(int numAtoms, const mixed4* __restrict__ velm, float4* __restrict__ cmMomentum) { extern __shared__ volatile float3 temp[]; float3 cm = make_float3(0, 0, 0); for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < numAtoms; index += blockDim.x*gridDim.x) { mixed4 velocity = velm[index]; if (velocity.w != 0) { mixed mass = RECIP(velocity.w); cm.x += (float) (velocity.x*mass); cm.y += (float) (velocity.y*mass); cm.z += (float) (velocity.z*mass); } } // Sum the threads in this group. int thread = threadIdx.x; temp[thread].x = cm.x; temp[thread].y = cm.y; temp[thread].z = cm.z; __syncthreads(); if (thread < 32) { temp[thread].x += temp[thread+32].x; temp[thread].y += temp[thread+32].y; temp[thread].z += temp[thread+32].z; if (thread < 16) { temp[thread].x += temp[thread+16].x; temp[thread].y += temp[thread+16].y; temp[thread].z += temp[thread+16].z; } if (thread < 8) { temp[thread].x += temp[thread+8].x; temp[thread].y += temp[thread+8].y; temp[thread].z += temp[thread+8].z; } if (thread < 4) { temp[thread].x += temp[thread+4].x; temp[thread].y += temp[thread+4].y; temp[thread].z += temp[thread+4].z; } if (thread < 2) { temp[thread].x += temp[thread+2].x; temp[thread].y += temp[thread+2].y; temp[thread].z += temp[thread+2].z; } } if (thread == 0) { float3 sum = make_float3(temp[thread].x+temp[thread+1].x, temp[thread].y+temp[thread+1].y, temp[thread].z+temp[thread+1].z); cmMomentum[blockIdx.x] = make_float4(sum.x, sum.y, sum.z, 0.0f); } } /** * Remove center of mass motion. */ extern "C" __global__ void removeCenterOfMassMomentum(unsigned int numAtoms, mixed4* __restrict__ velm, const float4* __restrict__ cmMomentum) { // First sum all of the momenta that were calculated by individual groups. extern volatile float3 temp[]; float3 cm = make_float3(0, 0, 0); for (unsigned int index = threadIdx.x; index < gridDim.x; index += blockDim.x) { float4 momentum = cmMomentum[index]; cm.x += momentum.x; cm.y += momentum.y; cm.z += momentum.z; } int thread = threadIdx.x; temp[thread].x = cm.x; temp[thread].y = cm.y; temp[thread].z = cm.z; __syncthreads(); if (thread < 32) { temp[thread].x += temp[thread+32].x; temp[thread].y += temp[thread+32].y; temp[thread].z += temp[thread+32].z; if (thread < 16) { temp[thread].x += temp[thread+16].x; temp[thread].y += temp[thread+16].y; temp[thread].z += temp[thread+16].z; } if (thread < 8) { temp[thread].x += temp[thread+8].x; temp[thread].y += temp[thread+8].y; temp[thread].z += temp[thread+8].z; } if (thread < 4) { temp[thread].x += temp[thread+4].x; temp[thread].y += temp[thread+4].y; temp[thread].z += temp[thread+4].z; } if (thread < 2) { temp[thread].x += temp[thread+2].x; temp[thread].y += temp[thread+2].y; temp[thread].z += temp[thread+2].z; } } __syncthreads(); cm = make_float3(INVERSE_TOTAL_MASS*(temp[0].x+temp[1].x), INVERSE_TOTAL_MASS*(temp[0].y+temp[1].y), INVERSE_TOTAL_MASS*(temp[0].z+temp[1].z)); // Now remove the center of mass velocity from each atom. for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < numAtoms; index += blockDim.x*gridDim.x) { mixed4 velocity = velm[index]; velocity.x -= cm.x; velocity.y -= cm.y; velocity.z -= cm.z; velm[index] = velocity; } }
4d8a7abbf8474fd2400b0439dd49e3955d5c5376.cu
/** * Calculate the center of mass momentum. */ extern "C" __global__ void calcCenterOfMassMomentum(int numAtoms, const mixed4* __restrict__ velm, float4* __restrict__ cmMomentum) { extern __shared__ volatile float3 temp[]; float3 cm = make_float3(0, 0, 0); for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < numAtoms; index += blockDim.x*gridDim.x) { mixed4 velocity = velm[index]; if (velocity.w != 0) { mixed mass = RECIP(velocity.w); cm.x += (float) (velocity.x*mass); cm.y += (float) (velocity.y*mass); cm.z += (float) (velocity.z*mass); } } // Sum the threads in this group. int thread = threadIdx.x; temp[thread].x = cm.x; temp[thread].y = cm.y; temp[thread].z = cm.z; __syncthreads(); if (thread < 32) { temp[thread].x += temp[thread+32].x; temp[thread].y += temp[thread+32].y; temp[thread].z += temp[thread+32].z; if (thread < 16) { temp[thread].x += temp[thread+16].x; temp[thread].y += temp[thread+16].y; temp[thread].z += temp[thread+16].z; } if (thread < 8) { temp[thread].x += temp[thread+8].x; temp[thread].y += temp[thread+8].y; temp[thread].z += temp[thread+8].z; } if (thread < 4) { temp[thread].x += temp[thread+4].x; temp[thread].y += temp[thread+4].y; temp[thread].z += temp[thread+4].z; } if (thread < 2) { temp[thread].x += temp[thread+2].x; temp[thread].y += temp[thread+2].y; temp[thread].z += temp[thread+2].z; } } if (thread == 0) { float3 sum = make_float3(temp[thread].x+temp[thread+1].x, temp[thread].y+temp[thread+1].y, temp[thread].z+temp[thread+1].z); cmMomentum[blockIdx.x] = make_float4(sum.x, sum.y, sum.z, 0.0f); } } /** * Remove center of mass motion. */ extern "C" __global__ void removeCenterOfMassMomentum(unsigned int numAtoms, mixed4* __restrict__ velm, const float4* __restrict__ cmMomentum) { // First sum all of the momenta that were calculated by individual groups. extern volatile float3 temp[]; float3 cm = make_float3(0, 0, 0); for (unsigned int index = threadIdx.x; index < gridDim.x; index += blockDim.x) { float4 momentum = cmMomentum[index]; cm.x += momentum.x; cm.y += momentum.y; cm.z += momentum.z; } int thread = threadIdx.x; temp[thread].x = cm.x; temp[thread].y = cm.y; temp[thread].z = cm.z; __syncthreads(); if (thread < 32) { temp[thread].x += temp[thread+32].x; temp[thread].y += temp[thread+32].y; temp[thread].z += temp[thread+32].z; if (thread < 16) { temp[thread].x += temp[thread+16].x; temp[thread].y += temp[thread+16].y; temp[thread].z += temp[thread+16].z; } if (thread < 8) { temp[thread].x += temp[thread+8].x; temp[thread].y += temp[thread+8].y; temp[thread].z += temp[thread+8].z; } if (thread < 4) { temp[thread].x += temp[thread+4].x; temp[thread].y += temp[thread+4].y; temp[thread].z += temp[thread+4].z; } if (thread < 2) { temp[thread].x += temp[thread+2].x; temp[thread].y += temp[thread+2].y; temp[thread].z += temp[thread+2].z; } } __syncthreads(); cm = make_float3(INVERSE_TOTAL_MASS*(temp[0].x+temp[1].x), INVERSE_TOTAL_MASS*(temp[0].y+temp[1].y), INVERSE_TOTAL_MASS*(temp[0].z+temp[1].z)); // Now remove the center of mass velocity from each atom. for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < numAtoms; index += blockDim.x*gridDim.x) { mixed4 velocity = velm[index]; velocity.x -= cm.x; velocity.y -= cm.y; velocity.z -= cm.z; velm[index] = velocity; } }
7b766159274b3bf224924aebc7e8b86f7dce2367.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __GPU_TIMER_H__ #define __GPU_TIMER_H__ namespace gpu_m4ri { struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); } void Stop() { hipEventRecord(stop, 0); } float ElapsedMs() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; } #endif /* __GPU_TIMER_H__ */
7b766159274b3bf224924aebc7e8b86f7dce2367.cu
#ifndef __GPU_TIMER_H__ #define __GPU_TIMER_H__ namespace gpu_m4ri { struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); } void Stop() { cudaEventRecord(stop, 0); } float ElapsedMs() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; } #endif /* __GPU_TIMER_H__ */
94d86ed5b1aa5da3a56265caa90081ca2816ddb4.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { const char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward"; void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() { jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return scaled_modified_bessel_k0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda); } // namespace at::native
94d86ed5b1aa5da3a56265caa90081ca2816ddb4.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { const char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward"; void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() { jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return scaled_modified_bessel_k0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda); } // namespace at::native
db11f55cf17dc78565fdc8f1daf64d21814d5204.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <iomanip> #include <iostream> #include <vector> #include <hip/hip_complex.h> #include <hip/hip_runtime.h> #include <cufinufft/common.h> #include <cufinufft/defs.h> #include <cufinufft/precision_independent.h> #include <cufinufft/spreadinterp.h> #include <cufinufft/utils.h> #include <legendre_rule_fast.h> namespace cufinufft { namespace common { using namespace cufinufft::spreadinterp; using std::max; /* Kernel for computing approximations of exact Fourier series coeffs of cnufftspread's real symmetric kernel. */ // a , f are intermediate results from function onedim_fseries_kernel_precomp() // (see cufinufft/contrib/common.cpp for description) template <typename T> __global__ void fseries_kernel_compute(int nf1, int nf2, int nf3, T *f, hipDoubleComplex *a, T *fwkerhalf1, T *fwkerhalf2, T *fwkerhalf3, int ns) { T J2 = ns / 2.0; int q = (int)(2 + 3.0 * J2); int nf; hipDoubleComplex *at = a + threadIdx.y * MAX_NQUAD; T *ft = f + threadIdx.y * MAX_NQUAD; T *oarr; if (threadIdx.y == 0) { oarr = fwkerhalf1; nf = nf1; } else if (threadIdx.y == 1) { oarr = fwkerhalf2; nf = nf2; } else { oarr = fwkerhalf3; nf = nf3; } for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < nf / 2 + 1; i += blockDim.x * gridDim.x) { int brk = 0.5 + i; T x = 0.0; for (int n = 0; n < q; n++) { x += ft[n] * 2 * (pow(cabs(at[n]), brk) * cos(brk * carg(at[n]))); } oarr[i] = x; } } template <typename T> int cufserieskernelcompute(int dim, int nf1, int nf2, int nf3, T *d_f, hipDoubleComplex *d_a, T *d_fwkerhalf1, T *d_fwkerhalf2, T *d_fwkerhalf3, int ns) /* wrapper for approximation of Fourier series of real symmetric spreading kernel. Melody Shih 2/20/22 */ { int nout = max(max(nf1 / 2 + 1, nf2 / 2 + 1), nf3 / 2 + 1); dim3 threadsPerBlock(16, dim); dim3 numBlocks((nout + 16 - 1) / 16, 1); hipLaunchKernelGGL(( fseries_kernel_compute), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, nf1, nf2, nf3, d_f, d_a, d_fwkerhalf1, d_fwkerhalf2, d_fwkerhalf3, ns); return 0; } template <typename T> int setup_spreader_for_nufft(finufft_spread_opts &spopts, T eps, cufinufft_opts opts) // Set up the spreader parameters given eps, and pass across various nufft // options. Report status of setup_spreader. Barnett 10/30/17 { int ier = setup_spreader(spopts, eps, (T)opts.upsampfac, opts.gpu_kerevalmeth); spopts.pirange = 1; // could allow user control? return ier; } void set_nf_type12(CUFINUFFT_BIGINT ms, cufinufft_opts opts, finufft_spread_opts spopts, CUFINUFFT_BIGINT *nf, CUFINUFFT_BIGINT bs) // type 1 & 2 recipe for how to set 1d size of upsampled array, nf, given opts // and requested number of Fourier modes ms. { *nf = (CUFINUFFT_BIGINT)(opts.upsampfac * ms); if (*nf < 2 * spopts.nspread) *nf = 2 * spopts.nspread; // otherwise spread fails if (*nf < MAX_NF) { // otherwise will fail anyway if (opts.gpu_method == 4) // expensive at huge nf *nf = utils::next235beven(*nf, bs); else *nf = utils::next235beven(*nf, 1); } } template <typename T> void onedim_fseries_kernel(CUFINUFFT_BIGINT nf, T *fwkerhalf, finufft_spread_opts opts) /* Approximates exact Fourier series coeffs of cnufftspread's real symmetric kernel, directly via q-node quadrature on Euler-Fourier formula, exploiting narrowness of kernel. Uses phase winding for cheap eval on the regular freq grid. Note that this is also the Fourier transform of the non-periodized kernel. The FT definition is f(k) = int e^{-ikx} f(x) dx. The output has an overall prefactor of 1/h, which is needed anyway for the correction, and arises because the quadrature weights are scaled for grid units not x units. Inputs: nf - size of 1d uniform spread grid, must be even. opts - spreading opts object, needed to eval kernel (must be already set up) Outputs: fwkerhalf - real Fourier series coeffs from indices 0 to nf/2 inclusive, divided by h = 2pi/n. (should be allocated for at least nf/2+1 Ts) Compare onedim_dct_kernel which has same interface, but computes DFT of sampled kernel, not quite the same object. Barnett 2/7/17. openmp (since slow vs fftw in 1D large-N case) 3/3/18 Melody 2/20/22 separate into precomp & comp functions defined below. */ { T f[MAX_NQUAD]; std::complex<double> a[MAX_NQUAD]; onedim_fseries_kernel_precomp(nf, f, a, opts); onedim_fseries_kernel_compute(nf, f, a, fwkerhalf, opts); } /* Precomputation of approximations of exact Fourier series coeffs of cnufftspread's real symmetric kernel. Inputs: nf - size of 1d uniform spread grid, must be even. opts - spreading opts object, needed to eval kernel (must be already set up) Outputs: a - phase winding rates f - funciton values at quadrature nodes multiplied with quadrature weights (a, f are provided as the inputs of onedim_fseries_kernel_compute() defined below) */ template <typename T> void onedim_fseries_kernel_precomp(CUFINUFFT_BIGINT nf, T *f, std::complex<double> *a, finufft_spread_opts opts) { T J2 = opts.nspread / 2.0; // J/2, half-width of ker z-support // # quadr nodes in z (from 0 to J/2; reflections will be added)... int q = (int)(2 + 3.0 * J2); // not sure why so large? cannot exceed MAX_NQUAD double z[2 * MAX_NQUAD]; double w[2 * MAX_NQUAD]; finufft::quadrature::legendre_compute_glr(2 * q, z, w); // only half the nodes used, eg on (0,1) for (int n = 0; n < q; ++n) { // set up nodes z_n and vals f_n z[n] *= J2; // rescale nodes f[n] = J2 * w[n] * evaluate_kernel((T)z[n], opts); // vals & quadr wei a[n] = exp((T)(2.0 * M_PI) * std::complex<T>(0.0, 1.0) * (T)(nf / 2 - z[n]) / (T)nf); // phase winding rates } } template <typename T> void onedim_fseries_kernel_compute(CUFINUFFT_BIGINT nf, T *f, std::complex<double> *a, T *fwkerhalf, finufft_spread_opts opts) { T J2 = opts.nspread / 2.0; // J/2, half-width of ker z-support int q = (int)(2 + 3.0 * J2); // not sure why so large? cannot exceed MAX_NQUAD CUFINUFFT_BIGINT nout = nf / 2 + 1; // how many values we're writing to int nt = ::min(nout, MY_OMP_GET_MAX_THREADS()); // how many chunks std::vector<CUFINUFFT_BIGINT> brk(nt + 1); // start indices for each thread for (int t = 0; t <= nt; ++t) // split nout mode indices btw threads brk[t] = (CUFINUFFT_BIGINT)(0.5 + nout * t / (double)nt); #pragma omp parallel { int t = MY_OMP_GET_THREAD_NUM(); if (t < nt) { // could be nt < actual # threads std::complex<double> aj[MAX_NQUAD]; // phase rotator for this thread for (int n = 0; n < q; ++n) aj[n] = pow(a[n], (T)brk[t]); // init phase factors for chunk for (CUFINUFFT_BIGINT j = brk[t]; j < brk[t + 1]; ++j) { // loop along output array T x = 0.0; // accumulator for answer at this j for (int n = 0; n < q; ++n) { x += f[n] * 2 * real(aj[n]); // include the negative freq aj[n] *= a[n]; // wind the phases } fwkerhalf[j] = x; } } } } template void onedim_fseries_kernel_compute(CUFINUFFT_BIGINT nf, float *f, std::complex<double> *a, float *fwkerhalf, finufft_spread_opts opts); template void onedim_fseries_kernel_compute(CUFINUFFT_BIGINT nf, double *f, std::complex<double> *a, double *fwkerhalf, finufft_spread_opts opts); template int setup_spreader_for_nufft(finufft_spread_opts &spopts, float eps, cufinufft_opts opts); template int setup_spreader_for_nufft(finufft_spread_opts &spopts, double eps, cufinufft_opts opts); template void onedim_fseries_kernel_precomp(CUFINUFFT_BIGINT nf, float *f, std::complex<double> *a, finufft_spread_opts opts); template void onedim_fseries_kernel_precomp(CUFINUFFT_BIGINT nf, double *f, std::complex<double> *a, finufft_spread_opts opts); template int cufserieskernelcompute(int dim, int nf1, int nf2, int nf3, float *d_f, hipDoubleComplex *d_a, float *d_fwkerhalf1, float *d_fwkerhalf2, float *d_fwkerhalf3, int ns); template int cufserieskernelcompute(int dim, int nf1, int nf2, int nf3, double *d_f, hipDoubleComplex *d_a, double *d_fwkerhalf1, double *d_fwkerhalf2, double *d_fwkerhalf3, int ns); template void onedim_fseries_kernel(CUFINUFFT_BIGINT nf, float *fwkerhalf, finufft_spread_opts opts); template void onedim_fseries_kernel(CUFINUFFT_BIGINT nf, double *fwkerhalf, finufft_spread_opts opts); } // namespace common } // namespace cufinufft
db11f55cf17dc78565fdc8f1daf64d21814d5204.cu
#include <algorithm> #include <iomanip> #include <iostream> #include <vector> #include <cuComplex.h> #include <cuda.h> #include <cufinufft/common.h> #include <cufinufft/defs.h> #include <cufinufft/precision_independent.h> #include <cufinufft/spreadinterp.h> #include <cufinufft/utils.h> #include <legendre_rule_fast.h> namespace cufinufft { namespace common { using namespace cufinufft::spreadinterp; using std::max; /* Kernel for computing approximations of exact Fourier series coeffs of cnufftspread's real symmetric kernel. */ // a , f are intermediate results from function onedim_fseries_kernel_precomp() // (see cufinufft/contrib/common.cpp for description) template <typename T> __global__ void fseries_kernel_compute(int nf1, int nf2, int nf3, T *f, cuDoubleComplex *a, T *fwkerhalf1, T *fwkerhalf2, T *fwkerhalf3, int ns) { T J2 = ns / 2.0; int q = (int)(2 + 3.0 * J2); int nf; cuDoubleComplex *at = a + threadIdx.y * MAX_NQUAD; T *ft = f + threadIdx.y * MAX_NQUAD; T *oarr; if (threadIdx.y == 0) { oarr = fwkerhalf1; nf = nf1; } else if (threadIdx.y == 1) { oarr = fwkerhalf2; nf = nf2; } else { oarr = fwkerhalf3; nf = nf3; } for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < nf / 2 + 1; i += blockDim.x * gridDim.x) { int brk = 0.5 + i; T x = 0.0; for (int n = 0; n < q; n++) { x += ft[n] * 2 * (pow(cabs(at[n]), brk) * cos(brk * carg(at[n]))); } oarr[i] = x; } } template <typename T> int cufserieskernelcompute(int dim, int nf1, int nf2, int nf3, T *d_f, cuDoubleComplex *d_a, T *d_fwkerhalf1, T *d_fwkerhalf2, T *d_fwkerhalf3, int ns) /* wrapper for approximation of Fourier series of real symmetric spreading kernel. Melody Shih 2/20/22 */ { int nout = max(max(nf1 / 2 + 1, nf2 / 2 + 1), nf3 / 2 + 1); dim3 threadsPerBlock(16, dim); dim3 numBlocks((nout + 16 - 1) / 16, 1); fseries_kernel_compute<<<numBlocks, threadsPerBlock>>>(nf1, nf2, nf3, d_f, d_a, d_fwkerhalf1, d_fwkerhalf2, d_fwkerhalf3, ns); return 0; } template <typename T> int setup_spreader_for_nufft(finufft_spread_opts &spopts, T eps, cufinufft_opts opts) // Set up the spreader parameters given eps, and pass across various nufft // options. Report status of setup_spreader. Barnett 10/30/17 { int ier = setup_spreader(spopts, eps, (T)opts.upsampfac, opts.gpu_kerevalmeth); spopts.pirange = 1; // could allow user control? return ier; } void set_nf_type12(CUFINUFFT_BIGINT ms, cufinufft_opts opts, finufft_spread_opts spopts, CUFINUFFT_BIGINT *nf, CUFINUFFT_BIGINT bs) // type 1 & 2 recipe for how to set 1d size of upsampled array, nf, given opts // and requested number of Fourier modes ms. { *nf = (CUFINUFFT_BIGINT)(opts.upsampfac * ms); if (*nf < 2 * spopts.nspread) *nf = 2 * spopts.nspread; // otherwise spread fails if (*nf < MAX_NF) { // otherwise will fail anyway if (opts.gpu_method == 4) // expensive at huge nf *nf = utils::next235beven(*nf, bs); else *nf = utils::next235beven(*nf, 1); } } template <typename T> void onedim_fseries_kernel(CUFINUFFT_BIGINT nf, T *fwkerhalf, finufft_spread_opts opts) /* Approximates exact Fourier series coeffs of cnufftspread's real symmetric kernel, directly via q-node quadrature on Euler-Fourier formula, exploiting narrowness of kernel. Uses phase winding for cheap eval on the regular freq grid. Note that this is also the Fourier transform of the non-periodized kernel. The FT definition is f(k) = int e^{-ikx} f(x) dx. The output has an overall prefactor of 1/h, which is needed anyway for the correction, and arises because the quadrature weights are scaled for grid units not x units. Inputs: nf - size of 1d uniform spread grid, must be even. opts - spreading opts object, needed to eval kernel (must be already set up) Outputs: fwkerhalf - real Fourier series coeffs from indices 0 to nf/2 inclusive, divided by h = 2pi/n. (should be allocated for at least nf/2+1 Ts) Compare onedim_dct_kernel which has same interface, but computes DFT of sampled kernel, not quite the same object. Barnett 2/7/17. openmp (since slow vs fftw in 1D large-N case) 3/3/18 Melody 2/20/22 separate into precomp & comp functions defined below. */ { T f[MAX_NQUAD]; std::complex<double> a[MAX_NQUAD]; onedim_fseries_kernel_precomp(nf, f, a, opts); onedim_fseries_kernel_compute(nf, f, a, fwkerhalf, opts); } /* Precomputation of approximations of exact Fourier series coeffs of cnufftspread's real symmetric kernel. Inputs: nf - size of 1d uniform spread grid, must be even. opts - spreading opts object, needed to eval kernel (must be already set up) Outputs: a - phase winding rates f - funciton values at quadrature nodes multiplied with quadrature weights (a, f are provided as the inputs of onedim_fseries_kernel_compute() defined below) */ template <typename T> void onedim_fseries_kernel_precomp(CUFINUFFT_BIGINT nf, T *f, std::complex<double> *a, finufft_spread_opts opts) { T J2 = opts.nspread / 2.0; // J/2, half-width of ker z-support // # quadr nodes in z (from 0 to J/2; reflections will be added)... int q = (int)(2 + 3.0 * J2); // not sure why so large? cannot exceed MAX_NQUAD double z[2 * MAX_NQUAD]; double w[2 * MAX_NQUAD]; finufft::quadrature::legendre_compute_glr(2 * q, z, w); // only half the nodes used, eg on (0,1) for (int n = 0; n < q; ++n) { // set up nodes z_n and vals f_n z[n] *= J2; // rescale nodes f[n] = J2 * w[n] * evaluate_kernel((T)z[n], opts); // vals & quadr wei a[n] = exp((T)(2.0 * M_PI) * std::complex<T>(0.0, 1.0) * (T)(nf / 2 - z[n]) / (T)nf); // phase winding rates } } template <typename T> void onedim_fseries_kernel_compute(CUFINUFFT_BIGINT nf, T *f, std::complex<double> *a, T *fwkerhalf, finufft_spread_opts opts) { T J2 = opts.nspread / 2.0; // J/2, half-width of ker z-support int q = (int)(2 + 3.0 * J2); // not sure why so large? cannot exceed MAX_NQUAD CUFINUFFT_BIGINT nout = nf / 2 + 1; // how many values we're writing to int nt = std::min(nout, MY_OMP_GET_MAX_THREADS()); // how many chunks std::vector<CUFINUFFT_BIGINT> brk(nt + 1); // start indices for each thread for (int t = 0; t <= nt; ++t) // split nout mode indices btw threads brk[t] = (CUFINUFFT_BIGINT)(0.5 + nout * t / (double)nt); #pragma omp parallel { int t = MY_OMP_GET_THREAD_NUM(); if (t < nt) { // could be nt < actual # threads std::complex<double> aj[MAX_NQUAD]; // phase rotator for this thread for (int n = 0; n < q; ++n) aj[n] = pow(a[n], (T)brk[t]); // init phase factors for chunk for (CUFINUFFT_BIGINT j = brk[t]; j < brk[t + 1]; ++j) { // loop along output array T x = 0.0; // accumulator for answer at this j for (int n = 0; n < q; ++n) { x += f[n] * 2 * real(aj[n]); // include the negative freq aj[n] *= a[n]; // wind the phases } fwkerhalf[j] = x; } } } } template void onedim_fseries_kernel_compute(CUFINUFFT_BIGINT nf, float *f, std::complex<double> *a, float *fwkerhalf, finufft_spread_opts opts); template void onedim_fseries_kernel_compute(CUFINUFFT_BIGINT nf, double *f, std::complex<double> *a, double *fwkerhalf, finufft_spread_opts opts); template int setup_spreader_for_nufft(finufft_spread_opts &spopts, float eps, cufinufft_opts opts); template int setup_spreader_for_nufft(finufft_spread_opts &spopts, double eps, cufinufft_opts opts); template void onedim_fseries_kernel_precomp(CUFINUFFT_BIGINT nf, float *f, std::complex<double> *a, finufft_spread_opts opts); template void onedim_fseries_kernel_precomp(CUFINUFFT_BIGINT nf, double *f, std::complex<double> *a, finufft_spread_opts opts); template int cufserieskernelcompute(int dim, int nf1, int nf2, int nf3, float *d_f, cuDoubleComplex *d_a, float *d_fwkerhalf1, float *d_fwkerhalf2, float *d_fwkerhalf3, int ns); template int cufserieskernelcompute(int dim, int nf1, int nf2, int nf3, double *d_f, cuDoubleComplex *d_a, double *d_fwkerhalf1, double *d_fwkerhalf2, double *d_fwkerhalf3, int ns); template void onedim_fseries_kernel(CUFINUFFT_BIGINT nf, float *fwkerhalf, finufft_spread_opts opts); template void onedim_fseries_kernel(CUFINUFFT_BIGINT nf, double *fwkerhalf, finufft_spread_opts opts); } // namespace common } // namespace cufinufft
afacbf0e48f0c0cef9b45f2d6e2c70201df7f0d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "matrix_mul.h" // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the device multiplication function __global__ void Muld(float*, float*, int, int, float*); // Host multiplication function // Compute C = A * B // hA is the height of A // wA is the width of A // wB is the width of B //export void Mul(float*, float*, int, int, int, float*); void Mul(float* A, float* B, int hA, int wA, int wB, float* C) { int size; // Load A and B to the device float* Ad; size = hA * wA * sizeof(float); hipMalloc((void**)&Ad, size); hipMemcpy(Ad, A, size, hipMemcpyHostToDevice); float* Bd; size = wA * wB * sizeof(float); hipMalloc((void**)&Bd, size); hipMemcpy(Bd, B, size, hipMemcpyHostToDevice); // Allocate C on the device float* Cd; size = hA * wB * sizeof(float); hipMalloc((void**)&Cd, size); // Compute the execution configuration assuming // the matrix dimensions are multiples of BLOCK_SIZE dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(wB / dimBlock.x, hA / dimBlock.y); // Launch the device computation hipLaunchKernelGGL(( Muld), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd, wA, wB, Cd); // Read C from the device hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Free device memory hipFree(Ad); hipFree(Bd); hipFree(Cd); } // Device multiplication function called by Mul() // Compute C = A * B // wA is the width of A // wB is the width of B __global__ void Muld(float* A, float* B, int wA, int wB, float* C) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA*BLOCK_SIZE*by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // The element of the block sub-matrix that is computed // by the thread float Csub = 0; // Loop over all the sub-matrices of A and B required to // compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Shared memory for the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Shared memory for the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from global memory to shared memory; // each thread loads one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element C[wB * BLOCK_SIZE * by + BLOCK_SIZE * bx + wA * ty + tx] = Csub; }
afacbf0e48f0c0cef9b45f2d6e2c70201df7f0d9.cu
#include <stdio.h> #include "matrix_mul.h" // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the device multiplication function __global__ void Muld(float*, float*, int, int, float*); // Host multiplication function // Compute C = A * B // hA is the height of A // wA is the width of A // wB is the width of B //export void Mul(float*, float*, int, int, int, float*); void Mul(float* A, float* B, int hA, int wA, int wB, float* C) { int size; // Load A and B to the device float* Ad; size = hA * wA * sizeof(float); cudaMalloc((void**)&Ad, size); cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice); float* Bd; size = wA * wB * sizeof(float); cudaMalloc((void**)&Bd, size); cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice); // Allocate C on the device float* Cd; size = hA * wB * sizeof(float); cudaMalloc((void**)&Cd, size); // Compute the execution configuration assuming // the matrix dimensions are multiples of BLOCK_SIZE dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(wB / dimBlock.x, hA / dimBlock.y); // Launch the device computation Muld<<<dimGrid, dimBlock>>>(Ad, Bd, wA, wB, Cd); // Read C from the device cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // Free device memory cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); } // Device multiplication function called by Mul() // Compute C = A * B // wA is the width of A // wB is the width of B __global__ void Muld(float* A, float* B, int wA, int wB, float* C) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA*BLOCK_SIZE*by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // The element of the block sub-matrix that is computed // by the thread float Csub = 0; // Loop over all the sub-matrices of A and B required to // compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Shared memory for the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Shared memory for the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from global memory to shared memory; // each thread loads one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) Csub += As[ty][k] * Bs[k][tx]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory; // each thread writes one element C[wB * BLOCK_SIZE * by + BLOCK_SIZE * bx + wA * ty + tx] = Csub; }
d825436695ac1fa0bafdacd94dcd35df603c4c62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2014 BVLC and contributors. #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/util/math_functions.hpp" using std::max; namespace caffe { template <typename Dtype> __global__ void kernel_get_max(const int num, const int dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { Dtype maxval = -FLT_MAX; for (int i = 0; i < dim; ++i) { maxval = max(data[index * dim + i], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_softmax_div(const int num, const int dim, const Dtype* scale, Dtype* data) { CUDA_KERNEL_LOOP(index, num * dim) { int n = index / dim; data[index] /= scale[n]; } } template <typename Dtype> __global__ void kernel_exp(const int num, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { out[index] = exp(data[index]); } } template <typename Dtype> void MultiSoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { Forward_cpu(bottom,top); } // TODO(Yangqing): implement the GPU version of softmax. template <typename Dtype> void MultiSoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { } INSTANTIATE_CLASS(MultiSoftmaxLayer); } // namespace caffe
d825436695ac1fa0bafdacd94dcd35df603c4c62.cu
// Copyright 2014 BVLC and contributors. #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/util/math_functions.hpp" using std::max; namespace caffe { template <typename Dtype> __global__ void kernel_get_max(const int num, const int dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { Dtype maxval = -FLT_MAX; for (int i = 0; i < dim; ++i) { maxval = max(data[index * dim + i], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_softmax_div(const int num, const int dim, const Dtype* scale, Dtype* data) { CUDA_KERNEL_LOOP(index, num * dim) { int n = index / dim; data[index] /= scale[n]; } } template <typename Dtype> __global__ void kernel_exp(const int num, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { out[index] = exp(data[index]); } } template <typename Dtype> void MultiSoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { Forward_cpu(bottom,top); } // TODO(Yangqing): implement the GPU version of softmax. template <typename Dtype> void MultiSoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { } INSTANTIATE_CLASS(MultiSoftmaxLayer); } // namespace caffe
56f0d3b6bbbd119707c6976f2a9043319c51a480.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <complex> #include <cufinufft_eitherprec.h> #include <profile.h> #include "../contrib/utils.h" using namespace std; int main(int argc, char* argv[]) { int N1, N2, M; int ntransf, maxbatchsize; if (argc<4) { fprintf(stderr, "Usage: cufinufft2d2many_test method N1 N2 [ntransf [maxbatchsize [M [tol]]]]\n" "Arguments:\n" " method: One of\n" " 1: nupts driven, or\n" " 2: sub-problem.\n" " N1, N2: The size of the 2D array.\n" " ntransf: Number of inputs (default 2 ^ 27 / (N1 * N2)).\n" " maxbatchsize: Number of simultaneous transforms (or 0 for default).\n" " M: The number of non-uniform points (default N1 * N2).\n" " tol: NUFFT tolerance (default 1e-6).\n"); return 1; } double w; int method; sscanf(argv[1],"%d",&method); sscanf(argv[2],"%lf",&w); N1 = (int)w; // so can read 1e6 right! sscanf(argv[3],"%lf",&w); N2 = (int)w; // so can read 1e6 right! M = 2*N1*N2;// let density always be 2 ntransf = pow(2,28)/M; if(argc>4){ sscanf(argv[4],"%d",&ntransf); } maxbatchsize = 0; // have cufinufft choose the default if(argc>5){ sscanf(argv[5],"%d",&maxbatchsize); } if(argc>6){ sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right! } FLT tol=1e-6; if(argc>7){ sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right! } int iflag=1; cout<<scientific<<setprecision(3); int ier; printf("#modes = %d, #inputs = %d, #NUpts = %d\n", N1*N2, ntransf, M); FLT *x, *y; CPX *c, *fk; #if 1 hipHostMalloc(&x, M*sizeof(FLT)); hipHostMalloc(&y, M*sizeof(FLT)); hipHostMalloc(&c, ntransf*M*sizeof(CPX)); hipHostMalloc(&fk,ntransf*N1*N2*sizeof(CPX)); #else x = (FLT*) malloc(M*sizeof(FLT)); y = (FLT*) malloc(M*sizeof(FLT)); c = (CPX*) malloc(ntransf*M*sizeof(CPX)); fk = (CPX*) malloc(ntransf*N1*N2*sizeof(CPX)); #endif FLT *d_x, *d_y; CUCPX *d_c, *d_fk; checkCudaErrors(hipMalloc(&d_x,M*sizeof(FLT))); checkCudaErrors(hipMalloc(&d_y,M*sizeof(FLT))); checkCudaErrors(hipMalloc(&d_c,ntransf*M*sizeof(CUCPX))); checkCudaErrors(hipMalloc(&d_fk,ntransf*N1*N2*sizeof(CUCPX))); // Making data for (int i = 0; i < M; i++) { x[i] = M_PI*randm11();// x in [-pi,pi) y[i] = M_PI*randm11(); } for(int i=0; i<ntransf*N1*N2; i++){ fk[i].real(randm11()); fk[i].imag(randm11()); } checkCudaErrors(hipMemcpy(d_x,x,M*sizeof(FLT),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_y,y,M*sizeof(FLT),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_fk,fk,N1*N2*ntransf*sizeof(CUCPX),hipMemcpyHostToDevice)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; double totaltime = 0; // warm up CUFFT (is slow, takes around 0.2 sec... ) hipEventRecord(start); { int nf1=1; hipfftHandle fftplan; hipfftPlan1d(&fftplan,nf1,CUFFT_TYPE,1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("[time ] dummy warmup call to CUFFT\t %.3g s\n", milliseconds/1000); // now to the test... CUFINUFFT_PLAN dplan; int dim = 2; int type = 2; // Here we setup our own opts, for gpu_method and gpu_kerevalmeth cufinufft_opts opts; ier=CUFINUFFT_DEFAULT_OPTS(type, dim, &opts); if(ier!=0){ printf("err %d: CUFINUFFT_DEFAULT_OPTS\n", ier); return ier; } opts.gpu_method=method; opts.gpu_kerevalmeth=1; int nmodes[3]; nmodes[0] = N1; nmodes[1] = N2; nmodes[2] = 1; hipEventRecord(start); { PROFILE_CUDA_GROUP("cufinufft2d_plan",2); ier=CUFINUFFT_MAKEPLAN(type, dim, nmodes, iflag, ntransf, tol, maxbatchsize, &dplan, &opts); if (ier!=0){ printf("err: cufinufft2d_plan\n"); return ier; } } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft plan:\t\t %.3g s\n", milliseconds/1000); hipEventRecord(start); { PROFILE_CUDA_GROUP("cufinufft2d_setNUpts",3); ier=CUFINUFFT_SETPTS(M, d_x, d_y, NULL, 0, NULL, NULL, NULL, dplan); if (ier!=0){ printf("err: cufinufft2d_setNUpts\n"); return ier; } } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft setNUpts:\t\t %.3g s\n", milliseconds/1000); hipEventRecord(start); { PROFILE_CUDA_GROUP("cufinufft2d_exec",4); ier=CUFINUFFT_EXECUTE(d_c, d_fk, dplan); if (ier!=0){ printf("err: cufinufft2d2_exec\n"); return ier; } } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; float exec_ms = milliseconds; printf("[time ] cufinufft exec:\t\t %.3g s\n", milliseconds/1000); hipEventRecord(start); { PROFILE_CUDA_GROUP("cufinufft2d_destroy",5); ier=CUFINUFFT_DESTROY(dplan); if(ier!=0){ printf("err %d: cufinufft2d2_destroy\n", ier); return ier; } } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft destroy:\t\t %.3g s\n", milliseconds/1000); checkCudaErrors(hipMemcpy(c,d_c,M*ntransf*sizeof(CUCPX),hipMemcpyDeviceToHost)); CPX* fkstart; CPX* cstart; int t = ntransf-1; fkstart = fk + t*N1*N2; cstart = c + t*M; int jt = M/2; // check arbitrary choice of one targ pt CPX J = IMA*(FLT)iflag; CPX ct = CPX(0,0); int m=0; for (int m2=-(N2/2); m2<=(N2-1)/2; ++m2) // loop in correct order over F for (int m1=-(N1/2); m1<=(N1-1)/2; ++m1) ct += fkstart[m++] * exp(J*(m1*x[jt] + m2*y[jt])); // crude direct printf("[gpu ] %dth data one targ: rel err in c[%ld] is %.3g\n",(int)t, (int64_t)jt,abs(cstart[jt]-ct)/infnorm(M,c)); printf("[totaltime] %.3g us, speed %.3g NUpts/s\n", totaltime*1000, M*ntransf/totaltime*1000); printf("\t\t\t\t\t(exec-only thoughput: %.3g NU pts/s)\n",M*ntransf/exec_ms*1000); hipHostFree(x); hipHostFree(y); hipHostFree(c); hipHostFree(fk); return 0; }
56f0d3b6bbbd119707c6976f2a9043319c51a480.cu
#include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <complex> #include <cufinufft_eitherprec.h> #include <profile.h> #include "../contrib/utils.h" using namespace std; int main(int argc, char* argv[]) { int N1, N2, M; int ntransf, maxbatchsize; if (argc<4) { fprintf(stderr, "Usage: cufinufft2d2many_test method N1 N2 [ntransf [maxbatchsize [M [tol]]]]\n" "Arguments:\n" " method: One of\n" " 1: nupts driven, or\n" " 2: sub-problem.\n" " N1, N2: The size of the 2D array.\n" " ntransf: Number of inputs (default 2 ^ 27 / (N1 * N2)).\n" " maxbatchsize: Number of simultaneous transforms (or 0 for default).\n" " M: The number of non-uniform points (default N1 * N2).\n" " tol: NUFFT tolerance (default 1e-6).\n"); return 1; } double w; int method; sscanf(argv[1],"%d",&method); sscanf(argv[2],"%lf",&w); N1 = (int)w; // so can read 1e6 right! sscanf(argv[3],"%lf",&w); N2 = (int)w; // so can read 1e6 right! M = 2*N1*N2;// let density always be 2 ntransf = pow(2,28)/M; if(argc>4){ sscanf(argv[4],"%d",&ntransf); } maxbatchsize = 0; // have cufinufft choose the default if(argc>5){ sscanf(argv[5],"%d",&maxbatchsize); } if(argc>6){ sscanf(argv[6],"%lf",&w); M = (int)w; // so can read 1e6 right! } FLT tol=1e-6; if(argc>7){ sscanf(argv[7],"%lf",&w); tol = (FLT)w; // so can read 1e6 right! } int iflag=1; cout<<scientific<<setprecision(3); int ier; printf("#modes = %d, #inputs = %d, #NUpts = %d\n", N1*N2, ntransf, M); FLT *x, *y; CPX *c, *fk; #if 1 cudaMallocHost(&x, M*sizeof(FLT)); cudaMallocHost(&y, M*sizeof(FLT)); cudaMallocHost(&c, ntransf*M*sizeof(CPX)); cudaMallocHost(&fk,ntransf*N1*N2*sizeof(CPX)); #else x = (FLT*) malloc(M*sizeof(FLT)); y = (FLT*) malloc(M*sizeof(FLT)); c = (CPX*) malloc(ntransf*M*sizeof(CPX)); fk = (CPX*) malloc(ntransf*N1*N2*sizeof(CPX)); #endif FLT *d_x, *d_y; CUCPX *d_c, *d_fk; checkCudaErrors(cudaMalloc(&d_x,M*sizeof(FLT))); checkCudaErrors(cudaMalloc(&d_y,M*sizeof(FLT))); checkCudaErrors(cudaMalloc(&d_c,ntransf*M*sizeof(CUCPX))); checkCudaErrors(cudaMalloc(&d_fk,ntransf*N1*N2*sizeof(CUCPX))); // Making data for (int i = 0; i < M; i++) { x[i] = M_PI*randm11();// x in [-pi,pi) y[i] = M_PI*randm11(); } for(int i=0; i<ntransf*N1*N2; i++){ fk[i].real(randm11()); fk[i].imag(randm11()); } checkCudaErrors(cudaMemcpy(d_x,x,M*sizeof(FLT),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_y,y,M*sizeof(FLT),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_fk,fk,N1*N2*ntransf*sizeof(CUCPX),cudaMemcpyHostToDevice)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; double totaltime = 0; // warm up CUFFT (is slow, takes around 0.2 sec... ) cudaEventRecord(start); { int nf1=1; cufftHandle fftplan; cufftPlan1d(&fftplan,nf1,CUFFT_TYPE,1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] dummy warmup call to CUFFT\t %.3g s\n", milliseconds/1000); // now to the test... CUFINUFFT_PLAN dplan; int dim = 2; int type = 2; // Here we setup our own opts, for gpu_method and gpu_kerevalmeth cufinufft_opts opts; ier=CUFINUFFT_DEFAULT_OPTS(type, dim, &opts); if(ier!=0){ printf("err %d: CUFINUFFT_DEFAULT_OPTS\n", ier); return ier; } opts.gpu_method=method; opts.gpu_kerevalmeth=1; int nmodes[3]; nmodes[0] = N1; nmodes[1] = N2; nmodes[2] = 1; cudaEventRecord(start); { PROFILE_CUDA_GROUP("cufinufft2d_plan",2); ier=CUFINUFFT_MAKEPLAN(type, dim, nmodes, iflag, ntransf, tol, maxbatchsize, &dplan, &opts); if (ier!=0){ printf("err: cufinufft2d_plan\n"); return ier; } } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft plan:\t\t %.3g s\n", milliseconds/1000); cudaEventRecord(start); { PROFILE_CUDA_GROUP("cufinufft2d_setNUpts",3); ier=CUFINUFFT_SETPTS(M, d_x, d_y, NULL, 0, NULL, NULL, NULL, dplan); if (ier!=0){ printf("err: cufinufft2d_setNUpts\n"); return ier; } } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft setNUpts:\t\t %.3g s\n", milliseconds/1000); cudaEventRecord(start); { PROFILE_CUDA_GROUP("cufinufft2d_exec",4); ier=CUFINUFFT_EXECUTE(d_c, d_fk, dplan); if (ier!=0){ printf("err: cufinufft2d2_exec\n"); return ier; } } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; float exec_ms = milliseconds; printf("[time ] cufinufft exec:\t\t %.3g s\n", milliseconds/1000); cudaEventRecord(start); { PROFILE_CUDA_GROUP("cufinufft2d_destroy",5); ier=CUFINUFFT_DESTROY(dplan); if(ier!=0){ printf("err %d: cufinufft2d2_destroy\n", ier); return ier; } } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft destroy:\t\t %.3g s\n", milliseconds/1000); checkCudaErrors(cudaMemcpy(c,d_c,M*ntransf*sizeof(CUCPX),cudaMemcpyDeviceToHost)); CPX* fkstart; CPX* cstart; int t = ntransf-1; fkstart = fk + t*N1*N2; cstart = c + t*M; int jt = M/2; // check arbitrary choice of one targ pt CPX J = IMA*(FLT)iflag; CPX ct = CPX(0,0); int m=0; for (int m2=-(N2/2); m2<=(N2-1)/2; ++m2) // loop in correct order over F for (int m1=-(N1/2); m1<=(N1-1)/2; ++m1) ct += fkstart[m++] * exp(J*(m1*x[jt] + m2*y[jt])); // crude direct printf("[gpu ] %dth data one targ: rel err in c[%ld] is %.3g\n",(int)t, (int64_t)jt,abs(cstart[jt]-ct)/infnorm(M,c)); printf("[totaltime] %.3g us, speed %.3g NUpts/s\n", totaltime*1000, M*ntransf/totaltime*1000); printf("\t\t\t\t\t(exec-only thoughput: %.3g NU pts/s)\n",M*ntransf/exec_ms*1000); cudaFreeHost(x); cudaFreeHost(y); cudaFreeHost(c); cudaFreeHost(fk); return 0; }
63185c573001cfab9431bb384dbc0838923240d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "random_hip.cuh" #include "random_gen.cuh" #include "catboost/cuda/cuda_lib/kernel/arch.cuh" namespace NKernel { __global__ void PoissonRandImpl(ui64* seeds, ui32 seedSize, const float* alpha, int* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextPoisson(&s, alpha[i]); seeds[i] = s; i += gridDim.x * blockDim.x; } } void PoissonRand(ui64* seeds, ui32 size, const float* alphas, int* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); hipLaunchKernelGGL(( PoissonRandImpl), dim3(numBlocks),dim3(blockSize), 0, stream, seeds, size, alphas, result); } __global__ void GaussianRandImpl(ui64* seeds, ui32 seedSize, float* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextNormal(&s); seeds[i] = s; i += gridDim.x * blockDim.x; } } void GaussianRand(ui64* seeds, ui32 size, float* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); hipLaunchKernelGGL(( GaussianRandImpl), dim3(numBlocks),dim3(blockSize), 0, stream, seeds, size, result); } __global__ void UniformRandImpl(ui64* seeds, ui32 seedSize, float* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextUniform(&s); seeds[i] = s; i += gridDim.x * blockDim.x; } } void UniformRand(ui64* seeds, ui32 size, float* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); hipLaunchKernelGGL(( UniformRandImpl), dim3(numBlocks), dim3(blockSize), 0, stream, seeds, size, result); } __global__ void GammaRandImpl(ui64* seeds, const float* alphas, const float* scale, ui32 seedSize, float* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextGamma(&s, alphas[i], scale[i]); seeds[i] = s; i += gridDim.x * blockDim.x; } } void GammaRand(ui64* seeds, const float* alphas, const float* scale, ui32 size, float* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); hipLaunchKernelGGL(( GammaRandImpl), dim3(numBlocks), dim3(blockSize), 0, stream, seeds, alphas, scale, size, result); } __global__ void BetaRandImpl(ui64* seeds, const float* alphas, const float* betas, ui32 seedSize, float* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextBeta(&s, alphas[i], betas[i]); seeds[i] = s; i += gridDim.x * blockDim.x; } } void BetaRand(ui64* seeds, const float* alphas, const float* betas, ui32 size, float* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); hipLaunchKernelGGL(( BetaRandImpl), dim3(numBlocks), dim3(blockSize), 0, stream, seeds, alphas, betas, size, result); } }
63185c573001cfab9431bb384dbc0838923240d3.cu
#include "random.cuh" #include "random_gen.cuh" #include "catboost/cuda/cuda_lib/kernel/arch.cuh" namespace NKernel { __global__ void PoissonRandImpl(ui64* seeds, ui32 seedSize, const float* alpha, int* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextPoisson(&s, alpha[i]); seeds[i] = s; i += gridDim.x * blockDim.x; } } void PoissonRand(ui64* seeds, ui32 size, const float* alphas, int* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); PoissonRandImpl<<<numBlocks,blockSize, 0, stream>>>(seeds, size, alphas, result); } __global__ void GaussianRandImpl(ui64* seeds, ui32 seedSize, float* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextNormal(&s); seeds[i] = s; i += gridDim.x * blockDim.x; } } void GaussianRand(ui64* seeds, ui32 size, float* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); GaussianRandImpl<<<numBlocks,blockSize, 0, stream>>>(seeds, size, result); } __global__ void UniformRandImpl(ui64* seeds, ui32 seedSize, float* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextUniform(&s); seeds[i] = s; i += gridDim.x * blockDim.x; } } void UniformRand(ui64* seeds, ui32 size, float* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); UniformRandImpl<<<numBlocks, blockSize, 0, stream>>>(seeds, size, result); } __global__ void GammaRandImpl(ui64* seeds, const float* alphas, const float* scale, ui32 seedSize, float* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextGamma(&s, alphas[i], scale[i]); seeds[i] = s; i += gridDim.x * blockDim.x; } } void GammaRand(ui64* seeds, const float* alphas, const float* scale, ui32 size, float* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); GammaRandImpl<<<numBlocks, blockSize, 0, stream>>>(seeds, alphas, scale, size, result); } __global__ void BetaRandImpl(ui64* seeds, const float* alphas, const float* betas, ui32 seedSize, float* result) { ui32 i = blockIdx.x * blockDim.x + threadIdx.x; while (i < seedSize) { ui64 s = seeds[i]; result[i] = NextBeta(&s, alphas[i], betas[i]); seeds[i] = s; i += gridDim.x * blockDim.x; } } void BetaRand(ui64* seeds, const float* alphas, const float* betas, ui32 size, float* result, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = min((size + blockSize - 1) / blockSize, TArchProps::MaxBlockCount()); BetaRandImpl<<<numBlocks, blockSize, 0, stream>>>(seeds, alphas, betas, size, result); } }
3fc67ea79a085b1831785a4cedafd7057032d09f.hip
// !!! This is a file automatically generated by hipify!!! // Automatically generated CU for E:\GitHub\NeuroGPU\Figures\FigureS3_passive_mew./runModel.hoc #include <stdio.h> #include <stdlib.h> #include <math.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "AllModels_hip.cuh" // Universals: #define PI (3.1415927f) #define R (8.31441f) #define FARADAY (96485.309f) #define ktf (1000.*8.3134*(celsius + 273.15)/FARADAY) #define DEF_vrest -65. #define DEF_nai 10. #define DEF_nao 140. #define DEF_ena (115. + DEF_vrest) #define DEF_ki 54.4 #define DEF_ko 2.5 #define DEF_ek (-12. + DEF_vrest) #include <math.h> #define DEF_cai 5.e-5 #define DEF_cao 2. #define DEF_eca 12.5 *log(DEF_cao / DEF_cai) // GGlobals: #define celsius (6.3) #define stoprun (0.0) #define clamp_resist (0.001) #define secondorder (0.0) // NGlobals: // Declarations: float Cunernst(float ci,float co, float z) { if (z == 0) { return 0.; } if (ci <= 0.) { return 1e6; }else if (co <= 0.) { return -1e6; }else{ return ktf/z*log(co/ci); } } // Functions: // Procedures: // Inits: __device__ void CuInitModel_pas(MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas){ }; // Derivs: // Breaks: __device__ void CuBreakpointModel_pas(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas) { MYFTYPE; MYFTYPE i; i = g_pas * ( v - e_pas ) ; i = i; sumCurrents+= i; sumConductivity+= g_pas; };
3fc67ea79a085b1831785a4cedafd7057032d09f.cu
// Automatically generated CU for E:\GitHub\NeuroGPU\Figures\FigureS3_passive_mew./runModel.hoc #include <stdio.h> #include <stdlib.h> #include <math.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "AllModels.cuh" // Universals: #define PI (3.1415927f) #define R (8.31441f) #define FARADAY (96485.309f) #define ktf (1000.*8.3134*(celsius + 273.15)/FARADAY) #define DEF_vrest -65. #define DEF_nai 10. #define DEF_nao 140. #define DEF_ena (115. + DEF_vrest) #define DEF_ki 54.4 #define DEF_ko 2.5 #define DEF_ek (-12. + DEF_vrest) #include <math.h> #define DEF_cai 5.e-5 #define DEF_cao 2. #define DEF_eca 12.5 *log(DEF_cao / DEF_cai) // GGlobals: #define celsius (6.3) #define stoprun (0.0) #define clamp_resist (0.001) #define secondorder (0.0) // NGlobals: // Declarations: float Cunernst(float ci,float co, float z) { if (z == 0) { return 0.; } if (ci <= 0.) { return 1e6; }else if (co <= 0.) { return -1e6; }else{ return ktf/z*log(co/ci); } } // Functions: // Procedures: // Inits: __device__ void CuInitModel_pas(MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas){ }; // Derivs: // Breaks: __device__ void CuBreakpointModel_pas(MYSECONDFTYPE &sumCurrents, MYFTYPE &sumConductivity, MYFTYPE v,MYFTYPE g_pas,MYFTYPE e_pas) { MYFTYPE; MYFTYPE i; i = g_pas * ( v - e_pas ) ; i = i; sumCurrents+= i; sumConductivity+= g_pas; };
d42ac4a9e8cf969e9b342eaf4267e62bd4be1c11.hip
// !!! This is a file automatically generated by hipify!!! #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #define N 300000 #define NSTREAM 8 // function for checking the CUDA runtime API results. inline void checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { printf_s("Error: %s : %d", __FILE__, __LINE__); printf_s("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result)); exit(1); } #endif } __global__ void kernel_1() { double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_2() { double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_3() { double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_4() { double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } int main(int argc, char **argv) { int n_streams = NSTREAM; int isize = 1; int iblock = 1; int bigcase = 0; // get argument from command line if (argc > 1) n_streams = atoi(argv[1]); if (argc > 2) bigcase = atoi(argv[2]); float elapsed_time; int dev = 0; hipDeviceProp_t deviceProp; checkCuda(hipGetDeviceProperties(&deviceProp, dev)); printf_s("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams); checkCuda(hipSetDevice(dev)); // check if device support hyper-q if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf_s("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n"); printf_s("> CUDA kernel runs will be serialized\n"); } else { printf_s("> GPU does not support HyperQ\n"); printf_s("> CUDA kernel runs will have limited concurrency\n"); } } printf_s("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // Allocate and initialize an array of stream handles hipStream_t *streams = (hipStream_t *)malloc(n_streams * sizeof(hipStream_t)); for (int i = 0; i < n_streams; i++) { checkCuda(hipStreamCreate(&(streams[i]))); } // run kernel with more threads if (bigcase == 1) { iblock = 512; isize = 1 << 20; } // set up execution configuration dim3 block(iblock); dim3 grid(isize / iblock); printf_s("> grid %d block %d\n", grid.x, block.x); // creat events hipEvent_t start, stop; checkCuda(hipEventCreate(&start)); checkCuda(hipEventCreate(&stop)); // record start event checkCuda(hipEventRecord(start, 0)); // dispatch job with depth first ordering omp_set_num_threads(n_streams); #pragma omp parallel { int i = omp_get_thread_num(); hipLaunchKernelGGL(( kernel_1) , dim3(grid), dim3(block), 0, streams[i] , ); hipLaunchKernelGGL(( kernel_2) , dim3(grid), dim3(block), 0, streams[i] , ); hipLaunchKernelGGL(( kernel_3) , dim3(grid), dim3(block), 0, streams[i] , ); hipLaunchKernelGGL(( kernel_4) , dim3(grid), dim3(block), 0, streams[i] , ); } // record stop event checkCuda(hipEventRecord(stop, 0)); checkCuda(hipEventSynchronize(stop)); // calculate elapsed time checkCuda(hipEventElapsedTime(&elapsed_time, start, stop)); printf_s("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f); // release all stream for (int i = 0; i < n_streams; i++) { checkCuda(hipStreamDestroy(streams[i])); } free(streams); // destroy events checkCuda(hipEventDestroy(start)); checkCuda(hipEventDestroy(stop)); // reset device checkCuda(hipDeviceReset()); return EXIT_SUCCESS; }
d42ac4a9e8cf969e9b342eaf4267e62bd4be1c11.cu
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #define N 300000 #define NSTREAM 8 // function for checking the CUDA runtime API results. inline void checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { printf_s("Error: %s : %d", __FILE__, __LINE__); printf_s("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result)); exit(1); } #endif } __global__ void kernel_1() { double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_2() { double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_3() { double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } __global__ void kernel_4() { double sum = 0.0; for (int i = 0; i < N; i++) { sum = sum + tan(0.1) * tan(0.1); } } int main(int argc, char **argv) { int n_streams = NSTREAM; int isize = 1; int iblock = 1; int bigcase = 0; // get argument from command line if (argc > 1) n_streams = atoi(argv[1]); if (argc > 2) bigcase = atoi(argv[2]); float elapsed_time; int dev = 0; cudaDeviceProp deviceProp; checkCuda(cudaGetDeviceProperties(&deviceProp, dev)); printf_s("> Using Device %d: %s with num_streams=%d\n", dev, deviceProp.name, n_streams); checkCuda(cudaSetDevice(dev)); // check if device support hyper-q if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5)) { if (deviceProp.concurrentKernels == 0) { printf_s("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n"); printf_s("> CUDA kernel runs will be serialized\n"); } else { printf_s("> GPU does not support HyperQ\n"); printf_s("> CUDA kernel runs will have limited concurrency\n"); } } printf_s("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // Allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t *)malloc(n_streams * sizeof(cudaStream_t)); for (int i = 0; i < n_streams; i++) { checkCuda(cudaStreamCreate(&(streams[i]))); } // run kernel with more threads if (bigcase == 1) { iblock = 512; isize = 1 << 20; } // set up execution configuration dim3 block(iblock); dim3 grid(isize / iblock); printf_s("> grid %d block %d\n", grid.x, block.x); // creat events cudaEvent_t start, stop; checkCuda(cudaEventCreate(&start)); checkCuda(cudaEventCreate(&stop)); // record start event checkCuda(cudaEventRecord(start, 0)); // dispatch job with depth first ordering omp_set_num_threads(n_streams); #pragma omp parallel { int i = omp_get_thread_num(); kernel_1 <<<grid, block, 0, streams[i] >>>(); kernel_2 <<<grid, block, 0, streams[i] >>>(); kernel_3 <<<grid, block, 0, streams[i] >>>(); kernel_4 <<<grid, block, 0, streams[i] >>>(); } // record stop event checkCuda(cudaEventRecord(stop, 0)); checkCuda(cudaEventSynchronize(stop)); // calculate elapsed time checkCuda(cudaEventElapsedTime(&elapsed_time, start, stop)); printf_s("Measured time for parallel execution = %.3fs\n", elapsed_time / 1000.0f); // release all stream for (int i = 0; i < n_streams; i++) { checkCuda(cudaStreamDestroy(streams[i])); } free(streams); // destroy events checkCuda(cudaEventDestroy(start)); checkCuda(cudaEventDestroy(stop)); // reset device checkCuda(cudaDeviceReset()); return EXIT_SUCCESS; }
80989a3be28bbc8f6058116c12af76ef69cdf942.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <ATen/hip/ApplyGridUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/Loops.cuh> namespace at { namespace native { namespace { void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; auto negval = negval_.to<opmath_t>(); gpu_kernel(iter, [negval] GPU_LAMBDA(scalar_t a) -> scalar_t { opmath_t aop = static_cast<opmath_t>(a); return aop > opmath_t(0) ? aop : aop * negval; }); }); } void leaky_relu_backward_kernel( TensorIteratorBase& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; auto negval = negval_.to<opmath_t>(); gpu_kernel( iter, [negval] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { opmath_t aop = static_cast<opmath_t>(a); opmath_t bop = static_cast<opmath_t>(b); return aop > opmath_t(0) ? bop : bop * negval; }); }); } } // namespace REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel); REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel); } // namespace native } // namespace at
80989a3be28bbc8f6058116c12af76ef69cdf942.cu
#define TORCH_ASSERT_NO_OPERATORS #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/cuda/ApplyGridUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/Loops.cuh> namespace at { namespace native { namespace { void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; auto negval = negval_.to<opmath_t>(); gpu_kernel(iter, [negval] GPU_LAMBDA(scalar_t a) -> scalar_t { opmath_t aop = static_cast<opmath_t>(a); return aop > opmath_t(0) ? aop : aop * negval; }); }); } void leaky_relu_backward_kernel( TensorIteratorBase& iter, const Scalar& negval_) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() { using opmath_t = at::opmath_type<scalar_t>; auto negval = negval_.to<opmath_t>(); gpu_kernel( iter, [negval] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { opmath_t aop = static_cast<opmath_t>(a); opmath_t bop = static_cast<opmath_t>(b); return aop > opmath_t(0) ? bop : bop * negval; }); }); } } // namespace REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel); REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel); } // namespace native } // namespace at
212bd7523c03cc9b7e475ec46908ce3a61ac5cf9.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <random> #include <tuple> #include <vector> #include <iostream> #include <ctc.h> #include "test.h" bool small_test() { const int alphabet_size = 5; const int T = 3; std::vector<float> activations = {0.1, 0.6, 0.1, 0.1, 0.1, 0.1, 0.1, 0.6, 0.1, 0.1, 0.1, 0.1, 0.1, 0.6, 0.1}; // Calculate the score analytically float expected_score; { std::vector<float> probs(activations.size()); softmax(activations.data(), alphabet_size, T, probs.data()); // Score calculation is specific to the given activations above expected_score = probs[1] * probs[7] * probs[13]; std::cout << probs[1] << std::endl; std::cout << probs[7] << std::endl; std::cout << expected_score << std::endl; } hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *activations_gpu; throw_on_error(hipMalloc(&activations_gpu, activations.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> labels = {1, 2, 3}; std::vector<int> label_lengths = {3}; std::vector<int> lengths; lengths.push_back(T); float score; ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in small_test"); std::cout << "allocate workspace in bytes:" << gpu_alloc_bytes << std::endl; char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, NULL, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score, ctc_gpu_workspace, info), "Error: compute_ctc_loss in small_test"); std::cout << score << std::endl; score = ::exp(-score); std::cout << score << std::endl; const float eps = 1e-6; const float lb = expected_score - eps; const float ub = expected_score + eps; throw_on_error(hipFree(activations_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return (score > lb && score < ub); } bool inf_test() { const int alphabet_size = 15; const int T = 50; const int L = 10; const int minibatch = 1; std::vector<int> labels = genLabels(alphabet_size, L); labels[0] = 2; std::vector<int> label_lengths = {L}; std::vector<float> acts = genActs(alphabet_size * T * minibatch); for (int i = 0; i < T; ++i) acts[alphabet_size * i + 2] = -1e30; hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *acts_gpu; throw_on_error(hipMalloc(&acts_gpu, acts.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> lengths; lengths.push_back(T); float *grads_gpu; throw_on_error(hipMalloc(&grads_gpu, (alphabet_size * T) * sizeof(float)), "hipMalloc"); float cost; ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in inf_test"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &cost, ctc_gpu_workspace, info), "Error: compute_ctc_loss in inf_test"); bool status = std::isinf(cost); std::vector<float> grads(alphabet_size * T); throw_on_error(hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream), "hipMemcpyAsync"); throw_on_error(hipStreamSynchronize(stream), "hipStreamSynchronize"); for (int i = 0; i < alphabet_size * T; ++i) status &= !std::isnan(grads[i]); throw_on_error(hipFree(acts_gpu), "hipFree"); throw_on_error(hipFree(grads_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return status; } float grad_check(int T, int alphabet_size, std::vector<float>& acts, const std::vector<std::vector<int>>& labels, const std::vector<int>& lengths) { float epsilon = 1e-2; const int minibatch = labels.size(); hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *acts_gpu; throw_on_error(hipMalloc(&acts_gpu, acts.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> flat_labels; std::vector<int> label_lengths; for (const auto& l : labels) { flat_labels.insert(flat_labels.end(), l.begin(), l.end()); label_lengths.push_back(l.size()); } std::vector<float> costs(minibatch); float *grads_gpu; throw_on_error(hipMalloc(&grads_gpu, acts.size() * sizeof(float)), "hipMalloc"); ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in grad_check"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costs.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (0) in grad_check"); std::vector<float> grads(acts.size()); throw_on_error(hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream), "hipMemcpyAsync"); throw_on_error(hipStreamSynchronize(stream), "hipStreamSynchronize"); std::vector<float> num_grad(grads.size()); //perform 2nd order central differencing for (int i = 0; i < T * alphabet_size * minibatch; ++i) { acts[i] += epsilon; throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<float> costsP1(minibatch); std::vector<float> costsP2(minibatch); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP1.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (1) in grad_check"); acts[i] -= 2 * epsilon; throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP2.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (2) in grad_check"); float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.); float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.); acts[i] += epsilon; num_grad[i] = (costP1 - costP2) / (2 * epsilon); } float diff = rel_diff(grads, num_grad); throw_on_error(hipFree(acts_gpu), "hipFree"); throw_on_error(hipFree(grads_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return diff; } bool run_tests() { std::vector<std::tuple<int, int, int, int, float>> problem_sizes = { std::make_tuple(28, 50, 15, 1, 1e-5) }; bool status = true; for (auto problem : problem_sizes) { int alphabet_size, T, L, minibatch; float tol; std::tie(alphabet_size, T, L, minibatch, tol) = problem; std::vector<float> acts = genActs(alphabet_size * T * minibatch); std::vector<std::vector<int>> labels; std::vector<int> sizes; for (int mb = 0; mb < minibatch; ++mb) { int actual_length = L; labels.push_back(genLabels(alphabet_size, actual_length)); sizes.push_back(T); } float diff = grad_check(T, alphabet_size, acts, labels, sizes); status &= (diff < tol); } return status; } int main(void) { std::cout << "Running GPU tests" << std::endl; throw_on_error(hipSetDevice(0), "hipSetDevice"); bool status = true; status &= small_test(); status &= inf_test(); status &= run_tests(); if (status) std::cout << "Tests pass" << std::endl; else std::cout << "Some or all tests fail" << std::endl; }
212bd7523c03cc9b7e475ec46908ce3a61ac5cf9.cu
#include <cmath> #include <random> #include <tuple> #include <vector> #include <iostream> #include <ctc.h> #include "test.h" bool small_test() { const int alphabet_size = 5; const int T = 3; std::vector<float> activations = {0.1, 0.6, 0.1, 0.1, 0.1, 0.1, 0.1, 0.6, 0.1, 0.1, 0.1, 0.1, 0.1, 0.6, 0.1}; // Calculate the score analytically float expected_score; { std::vector<float> probs(activations.size()); softmax(activations.data(), alphabet_size, T, probs.data()); // Score calculation is specific to the given activations above expected_score = probs[1] * probs[7] * probs[13]; std::cout << probs[1] << std::endl; std::cout << probs[7] << std::endl; std::cout << expected_score << std::endl; } cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *activations_gpu; throw_on_error(cudaMalloc(&activations_gpu, activations.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> labels = {1, 2, 3}; std::vector<int> label_lengths = {3}; std::vector<int> lengths; lengths.push_back(T); float score; ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in small_test"); std::cout << "allocate workspace in bytes:" << gpu_alloc_bytes << std::endl; char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, NULL, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score, ctc_gpu_workspace, info), "Error: compute_ctc_loss in small_test"); std::cout << score << std::endl; score = std::exp(-score); std::cout << score << std::endl; const float eps = 1e-6; const float lb = expected_score - eps; const float ub = expected_score + eps; throw_on_error(cudaFree(activations_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return (score > lb && score < ub); } bool inf_test() { const int alphabet_size = 15; const int T = 50; const int L = 10; const int minibatch = 1; std::vector<int> labels = genLabels(alphabet_size, L); labels[0] = 2; std::vector<int> label_lengths = {L}; std::vector<float> acts = genActs(alphabet_size * T * minibatch); for (int i = 0; i < T; ++i) acts[alphabet_size * i + 2] = -1e30; cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *acts_gpu; throw_on_error(cudaMalloc(&acts_gpu, acts.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> lengths; lengths.push_back(T); float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, (alphabet_size * T) * sizeof(float)), "cudaMalloc"); float cost; ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in inf_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &cost, ctc_gpu_workspace, info), "Error: compute_ctc_loss in inf_test"); bool status = std::isinf(cost); std::vector<float> grads(alphabet_size * T); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); for (int i = 0; i < alphabet_size * T; ++i) status &= !std::isnan(grads[i]); throw_on_error(cudaFree(acts_gpu), "cudaFree"); throw_on_error(cudaFree(grads_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return status; } float grad_check(int T, int alphabet_size, std::vector<float>& acts, const std::vector<std::vector<int>>& labels, const std::vector<int>& lengths) { float epsilon = 1e-2; const int minibatch = labels.size(); cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *acts_gpu; throw_on_error(cudaMalloc(&acts_gpu, acts.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> flat_labels; std::vector<int> label_lengths; for (const auto& l : labels) { flat_labels.insert(flat_labels.end(), l.begin(), l.end()); label_lengths.push_back(l.size()); } std::vector<float> costs(minibatch); float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, acts.size() * sizeof(float)), "cudaMalloc"); ctcComputeInfo info; info.loc = CTC_GPU; info.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), info, &gpu_alloc_bytes), "Error: get_workspace_size in grad_check"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costs.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (0) in grad_check"); std::vector<float> grads(acts.size()); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); std::vector<float> num_grad(grads.size()); //perform 2nd order central differencing for (int i = 0; i < T * alphabet_size * minibatch; ++i) { acts[i] += epsilon; throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<float> costsP1(minibatch); std::vector<float> costsP2(minibatch); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP1.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (1) in grad_check"); acts[i] -= 2 * epsilon; throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP2.data(), ctc_gpu_workspace, info), "Error: compute_ctc_loss (2) in grad_check"); float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.); float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.); acts[i] += epsilon; num_grad[i] = (costP1 - costP2) / (2 * epsilon); } float diff = rel_diff(grads, num_grad); throw_on_error(cudaFree(acts_gpu), "cudaFree"); throw_on_error(cudaFree(grads_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return diff; } bool run_tests() { std::vector<std::tuple<int, int, int, int, float>> problem_sizes = { std::make_tuple(28, 50, 15, 1, 1e-5) }; bool status = true; for (auto problem : problem_sizes) { int alphabet_size, T, L, minibatch; float tol; std::tie(alphabet_size, T, L, minibatch, tol) = problem; std::vector<float> acts = genActs(alphabet_size * T * minibatch); std::vector<std::vector<int>> labels; std::vector<int> sizes; for (int mb = 0; mb < minibatch; ++mb) { int actual_length = L; labels.push_back(genLabels(alphabet_size, actual_length)); sizes.push_back(T); } float diff = grad_check(T, alphabet_size, acts, labels, sizes); status &= (diff < tol); } return status; } int main(void) { std::cout << "Running GPU tests" << std::endl; throw_on_error(cudaSetDevice(0), "cudaSetDevice"); bool status = true; status &= small_test(); status &= inf_test(); status &= run_tests(); if (status) std::cout << "Tests pass" << std::endl; else std::cout << "Some or all tests fail" << std::endl; }
18d342c9bd1ed066ed786576c53a0a2ef1d20432.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; if(row < numCRows && col < numCColumns) { float cSum = 0.0f; for(int i=0; i<numAColumns; i++) { cSum = cSum + (A[row*numAColumns + i] * B[i*numBColumns + col]); } C[row*numCColumns + col] = cSum; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float*)malloc(sizeof(float)*numCRows*numCColumns); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here hipMalloc((void**)&deviceA, sizeof(float)*numARows*numAColumns); hipMalloc((void**)&deviceB, sizeof(float)*numBRows*numBColumns); hipMalloc((void**)&deviceC, sizeof(float)*numCRows*numCColumns); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 numBlocks(((numCColumns-1)/32)+1, ((numCRows-1)/32)+1, 1); dim3 numThreads(32,32,1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiply), dim3(numBlocks), dim3(numThreads) , 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
18d342c9bd1ed066ed786576c53a0a2ef1d20432.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; if(row < numCRows && col < numCColumns) { float cSum = 0.0f; for(int i=0; i<numAColumns; i++) { cSum = cSum + (A[row*numAColumns + i] * B[i*numBColumns + col]); } C[row*numCColumns + col] = cSum; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float*)malloc(sizeof(float)*numCRows*numCColumns); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here cudaMalloc((void**)&deviceA, sizeof(float)*numARows*numAColumns); cudaMalloc((void**)&deviceB, sizeof(float)*numBRows*numBColumns); cudaMalloc((void**)&deviceC, sizeof(float)*numCRows*numCColumns); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 numBlocks(((numCColumns-1)/32)+1, ((numCRows-1)/32)+1, 1); dim3 numThreads(32,32,1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiply<<< numBlocks, numThreads >>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
e608ddffd4ba38941354fa004e8440fdae04e659.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/detail/utilities/cuda.cuh> namespace cudf { namespace detail { namespace { struct get_element_functor { template <typename T, std::enable_if_t<is_fixed_width<T>() && !is_fixed_point<T>()> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, hipStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { auto s = make_fixed_width_scalar(data_type(type_to_id<T>()), stream, mr); using ScalarType = cudf::scalar_type_t<T>; auto typed_s = static_cast<ScalarType *>(s.get()); auto device_s = get_scalar_device_view(*typed_s); auto device_col = column_device_view::create(input, stream); device_single_thread( [device_s, d_col = *device_col, index] __device__() mutable { device_s.set_value(d_col.element<T>(index)); device_s.set_valid(d_col.is_valid(index)); }, stream); return s; } template <typename T, std::enable_if_t<std::is_same<T, string_view>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, hipStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { auto device_col = column_device_view::create(input, stream); rmm::device_scalar<string_view> temp_data; rmm::device_scalar<bool> temp_valid; device_single_thread( [buffer = temp_data.data(), validity = temp_valid.data(), d_col = *device_col, index] __device__() mutable { *buffer = d_col.element<string_view>(index); *validity = d_col.is_valid(index); }, stream); return std::make_unique<string_scalar>(temp_data, temp_valid.value(stream), stream, mr); } template <typename T, std::enable_if_t<std::is_same<T, dictionary32>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, hipStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { auto dict_view = dictionary_column_view(input); auto key_index_scalar = get_element_functor{}.operator()<int32_t>(dict_view.indices(), index, stream); size_type key_index = static_cast<numeric_scalar<int32_t> const *>(key_index_scalar.get())->value(stream); auto result = type_dispatcher( dict_view.keys().type(), get_element_functor{}, dict_view.keys(), key_index, stream, mr); auto result_validity = result->validity_data(); auto device_col = column_device_view::create(input, stream); device_single_thread( [result_validity, d_col = *device_col, index] __device__() mutable { *result_validity = d_col.is_valid(index); }, stream); return result; } template <typename T, std::enable_if_t<std::is_same<T, list_view>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, hipStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { CUDF_FAIL("get_element_functor not supported for list_view"); } template <typename T, std::enable_if_t<std::is_same<T, numeric::decimal32>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, hipStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { CUDF_FAIL("get_element_functor not supported for decimal32"); } template <typename T, std::enable_if_t<std::is_same<T, numeric::decimal64>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, hipStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { CUDF_FAIL("get_element_functor not supported for decimal64"); } template <typename T, std::enable_if_t<std::is_same<T, struct_view>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, hipStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { CUDF_FAIL("get_element_functor not supported for struct_view"); } }; } // namespace std::unique_ptr<scalar> get_element(column_view const &input, size_type index, hipStream_t stream, rmm::mr::device_memory_resource *mr) { CUDF_EXPECTS(index >= 0 and index < input.size(), "Index out of bounds"); return type_dispatcher(input.type(), get_element_functor{}, input, index, stream, mr); } } // namespace detail std::unique_ptr<scalar> get_element(column_view const &input, size_type index, rmm::mr::device_memory_resource *mr) { return detail::get_element(input, index, 0, mr); } } // namespace cudf
e608ddffd4ba38941354fa004e8440fdae04e659.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/detail/utilities/cuda.cuh> namespace cudf { namespace detail { namespace { struct get_element_functor { template <typename T, std::enable_if_t<is_fixed_width<T>() && !is_fixed_point<T>()> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, cudaStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { auto s = make_fixed_width_scalar(data_type(type_to_id<T>()), stream, mr); using ScalarType = cudf::scalar_type_t<T>; auto typed_s = static_cast<ScalarType *>(s.get()); auto device_s = get_scalar_device_view(*typed_s); auto device_col = column_device_view::create(input, stream); device_single_thread( [device_s, d_col = *device_col, index] __device__() mutable { device_s.set_value(d_col.element<T>(index)); device_s.set_valid(d_col.is_valid(index)); }, stream); return s; } template <typename T, std::enable_if_t<std::is_same<T, string_view>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, cudaStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { auto device_col = column_device_view::create(input, stream); rmm::device_scalar<string_view> temp_data; rmm::device_scalar<bool> temp_valid; device_single_thread( [buffer = temp_data.data(), validity = temp_valid.data(), d_col = *device_col, index] __device__() mutable { *buffer = d_col.element<string_view>(index); *validity = d_col.is_valid(index); }, stream); return std::make_unique<string_scalar>(temp_data, temp_valid.value(stream), stream, mr); } template <typename T, std::enable_if_t<std::is_same<T, dictionary32>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, cudaStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { auto dict_view = dictionary_column_view(input); auto key_index_scalar = get_element_functor{}.operator()<int32_t>(dict_view.indices(), index, stream); size_type key_index = static_cast<numeric_scalar<int32_t> const *>(key_index_scalar.get())->value(stream); auto result = type_dispatcher( dict_view.keys().type(), get_element_functor{}, dict_view.keys(), key_index, stream, mr); auto result_validity = result->validity_data(); auto device_col = column_device_view::create(input, stream); device_single_thread( [result_validity, d_col = *device_col, index] __device__() mutable { *result_validity = d_col.is_valid(index); }, stream); return result; } template <typename T, std::enable_if_t<std::is_same<T, list_view>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, cudaStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { CUDF_FAIL("get_element_functor not supported for list_view"); } template <typename T, std::enable_if_t<std::is_same<T, numeric::decimal32>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, cudaStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { CUDF_FAIL("get_element_functor not supported for decimal32"); } template <typename T, std::enable_if_t<std::is_same<T, numeric::decimal64>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, cudaStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { CUDF_FAIL("get_element_functor not supported for decimal64"); } template <typename T, std::enable_if_t<std::is_same<T, struct_view>::value> *p = nullptr> std::unique_ptr<scalar> operator()( column_view const &input, size_type index, cudaStream_t stream = 0, rmm::mr::device_memory_resource *mr = rmm::mr::get_default_resource()) { CUDF_FAIL("get_element_functor not supported for struct_view"); } }; } // namespace std::unique_ptr<scalar> get_element(column_view const &input, size_type index, cudaStream_t stream, rmm::mr::device_memory_resource *mr) { CUDF_EXPECTS(index >= 0 and index < input.size(), "Index out of bounds"); return type_dispatcher(input.type(), get_element_functor{}, input, index, stream, mr); } } // namespace detail std::unique_ptr<scalar> get_element(column_view const &input, size_type index, rmm::mr::device_memory_resource *mr) { return detail::get_element(input, index, 0, mr); } } // namespace cudf
010d4489cf3cca7e05493042a5839439025ce6a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation **********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void init_line(void); void update(void); void printfinal(void); int nsteps, /* number of time steps */ tpoints, /* total points along string */ rcode; /* generic return code */ float *values, /* values at time t */ *oldval, /* values at time t-dt */ *newval; /* values at time t+dt */ /********************************************************************* * Checks input value from parameter *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Initialize points on line **********************************************************************/ void init_line(void) { int i, j; float x, fac, k, tmp; /*allocate array memory to host*/ size_t size = (tpoints+2)*sizeof(float); values = (float*)malloc(size); newval = (float*)malloc(size); oldval = (float*)malloc(size); /* Calculate initial values based on sine curve. */ fac = 2.0 * PI; k = 0.0; tmp = tpoints - 1; for (j = 0; j < tpoints; j++) { x = k/tmp; values[j] = sin(fac * x); k = k + 1.0; } /* Initialize old values array */ for (i = 0; i < tpoints; i++) oldval[i] = values[i]; } /********************************************************************** * Calculate new values using wave equation **********************************************************************/ void do_math(int i) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]); } /********************************************************************** * Update all values along line a specified number of times **********************************************************************/ __global__ void parallel_update(float* d_values, float* d_oldval, float* d_newval, int tpoints, int nsteps, int threadsPerBlock){ int i; int pointID = blockIdx.x*threadsPerBlock+threadIdx.x; float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c*dtime / dx); sqtau = tau * tau; for(i=0; i<nsteps;i++){ if((pointID == 1) || (pointID == tpoints)) d_newval[pointID] = 0.0; else d_newval[pointID] = (2.0*d_values[pointID]) - d_oldval[pointID] + (sqtau*(-2.0)*d_values[pointID]); d_oldval[pointID] = d_values[pointID]; d_values[pointID] = d_newval[pointID]; } } void update() { int i, j; /* Update values for each time step */ for (i = 0; i < nsteps; i++) { for (j = 0; j < tpoints; j++) { /* global endpoints */ if ((j == 1) || (j == tpoints)) newval[j] = 0.0; else do_math(j); } /* Update old values with new values */ for (j = 0; j < tpoints; j++) { oldval[j] = values[j]; values[j] = newval[j]; } } } /********************************************************************** * Print final results * *********************************************************************/ void printfinal () { int i; for (i = 1; i <= tpoints; i++) { printf("%6.4f ", values[i]); if (i%10 == 0) printf("\n"); } } int main(int argc, const char *argv[]) { sscanf(argv[1], "%d", &tpoints); sscanf(argv[2], "%d", &nsteps); check_param(); printf("Initializing points on the line...\n"); init_line(); printf("Updating all points for all time steps...\n"); //update(); //parallel this function //allocate vector in device's memory size_t size=(tpoints+2)*sizeof(float); float* d_values; hipMalloc(&d_values, size); float* d_oldval; hipMalloc(&d_oldval, size); float* d_newval; hipMalloc(&d_newval, size); //copy memory from host to device hipMemcpy( d_values, values, size, hipMemcpyHostToDevice); hipMemcpy( d_newval, newval, size, hipMemcpyHostToDevice); hipMemcpy( d_oldval, oldval, size, hipMemcpyHostToDevice); //launch the kernel int N = tpoints+2; int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock-1) / threadsPerBlock; //N is the number of total threads hipLaunchKernelGGL(( parallel_update), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_values, d_oldval, d_newval, tpoints, nsteps, threadsPerBlock); //copy the result from device to host hipMemcpy(values, d_values, size, hipMemcpyDeviceToHost); printf("Printing final results...\n"); printfinal(); printf("\nDone.\n\n"); //free device's memory hipFree(d_values); hipFree(d_oldval); hipFree(d_newval); //free host memory free(values); free(newval); free(oldval); return 0; }
010d4489cf3cca7e05493042a5839439025ce6a0.cu
/********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation **********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void init_line(void); void update(void); void printfinal(void); int nsteps, /* number of time steps */ tpoints, /* total points along string */ rcode; /* generic return code */ float *values, /* values at time t */ *oldval, /* values at time t-dt */ *newval; /* values at time t+dt */ /********************************************************************* * Checks input value from parameter *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Initialize points on line **********************************************************************/ void init_line(void) { int i, j; float x, fac, k, tmp; /*allocate array memory to host*/ size_t size = (tpoints+2)*sizeof(float); values = (float*)malloc(size); newval = (float*)malloc(size); oldval = (float*)malloc(size); /* Calculate initial values based on sine curve. */ fac = 2.0 * PI; k = 0.0; tmp = tpoints - 1; for (j = 0; j < tpoints; j++) { x = k/tmp; values[j] = sin(fac * x); k = k + 1.0; } /* Initialize old values array */ for (i = 0; i < tpoints; i++) oldval[i] = values[i]; } /********************************************************************** * Calculate new values using wave equation **********************************************************************/ void do_math(int i) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]); } /********************************************************************** * Update all values along line a specified number of times **********************************************************************/ __global__ void parallel_update(float* d_values, float* d_oldval, float* d_newval, int tpoints, int nsteps, int threadsPerBlock){ int i; int pointID = blockIdx.x*threadsPerBlock+threadIdx.x; float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c*dtime / dx); sqtau = tau * tau; for(i=0; i<nsteps;i++){ if((pointID == 1) || (pointID == tpoints)) d_newval[pointID] = 0.0; else d_newval[pointID] = (2.0*d_values[pointID]) - d_oldval[pointID] + (sqtau*(-2.0)*d_values[pointID]); d_oldval[pointID] = d_values[pointID]; d_values[pointID] = d_newval[pointID]; } } void update() { int i, j; /* Update values for each time step */ for (i = 0; i < nsteps; i++) { for (j = 0; j < tpoints; j++) { /* global endpoints */ if ((j == 1) || (j == tpoints)) newval[j] = 0.0; else do_math(j); } /* Update old values with new values */ for (j = 0; j < tpoints; j++) { oldval[j] = values[j]; values[j] = newval[j]; } } } /********************************************************************** * Print final results * *********************************************************************/ void printfinal () { int i; for (i = 1; i <= tpoints; i++) { printf("%6.4f ", values[i]); if (i%10 == 0) printf("\n"); } } int main(int argc, const char *argv[]) { sscanf(argv[1], "%d", &tpoints); sscanf(argv[2], "%d", &nsteps); check_param(); printf("Initializing points on the line...\n"); init_line(); printf("Updating all points for all time steps...\n"); //update(); //parallel this function //allocate vector in device's memory size_t size=(tpoints+2)*sizeof(float); float* d_values; cudaMalloc(&d_values, size); float* d_oldval; cudaMalloc(&d_oldval, size); float* d_newval; cudaMalloc(&d_newval, size); //copy memory from host to device cudaMemcpy( d_values, values, size, cudaMemcpyHostToDevice); cudaMemcpy( d_newval, newval, size, cudaMemcpyHostToDevice); cudaMemcpy( d_oldval, oldval, size, cudaMemcpyHostToDevice); //launch the kernel int N = tpoints+2; int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock-1) / threadsPerBlock; //N is the number of total threads parallel_update<<<blocksPerGrid, threadsPerBlock>>>( d_values, d_oldval, d_newval, tpoints, nsteps, threadsPerBlock); //copy the result from device to host cudaMemcpy(values, d_values, size, cudaMemcpyDeviceToHost); printf("Printing final results...\n"); printfinal(); printf("\nDone.\n\n"); //free device's memory cudaFree(d_values); cudaFree(d_oldval); cudaFree(d_newval); //free host memory free(values); free(newval); free(oldval); return 0; }
a653773b2f89aeeae6dbcac7144a0267e2d2930f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); __global__ void saxpy_kernel(int N, float alpha, float* x, float* y, float* result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) result[index] = alpha * x[index] + y[index]; } void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) { int totalBytes = sizeof(float) * 3 * N; // compute number of blocks and threads per block const int threadsPerBlock = 512; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; float* device_x; float* device_y; float* device_result; // // DONE: allocate device memory buffers on the GPU using // hipMalloc. The started code issues warnings on build because // these buffers are used in the call to saxpy_kernel below // without being initialized. // int arraySize = N * sizeof(float); hipMalloc(&device_x, arraySize); hipMalloc(&device_y, arraySize); hipMalloc(&device_result, arraySize); // start timing after allocation of device memory. double startTime = CycleTimer::currentSeconds(); // // DONE: copy input arrays to the GPU using hipMemcpy // hipMemcpy(device_x, xarray, arraySize, hipMemcpyHostToDevice); hipMemcpy(device_y, yarray, arraySize, hipMemcpyHostToDevice); // // DONE: insert time here to begin timing only the kernel // double kernelStartTime = CycleTimer::currentSeconds(); // run saxpy_kernel on the GPU hipLaunchKernelGGL(( saxpy_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, alpha, device_x, device_y, device_result); // // DONE: insert timer here to time only the kernel. Since the // kernel will run asynchronously with the calling CPU thread, you // need to call hipDeviceSynchronize() before your timer to // ensure the kernel running on the GPU has completed. (Otherwise // you will incorrectly observe that almost no time elapses!) // hipDeviceSynchronize(); double kernelEndTime = CycleTimer::currentSeconds(); // // DONE: copy result from GPU using hipMemcpy // hipMemcpy(resultarray, device_result, arraySize, hipMemcpyDeviceToHost); // end timing after result has been copied back into host memory. // The time elapsed between startTime and endTime is the total // time to copy data to the GPU, run the kernel, and copy the // result back to the CPU double endTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; printf("Overall time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); double kernelExDuration = kernelEndTime - kernelStartTime; printf("Kernel time: %.3f ms\n", 1000.f * kernelExDuration); // // DONE free memory buffers on the GPU // hipFree(device_x); hipFree(device_y); hipFree(device_result); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
a653773b2f89aeeae6dbcac7144a0267e2d2930f.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); __global__ void saxpy_kernel(int N, float alpha, float* x, float* y, float* result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) result[index] = alpha * x[index] + y[index]; } void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) { int totalBytes = sizeof(float) * 3 * N; // compute number of blocks and threads per block const int threadsPerBlock = 512; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; float* device_x; float* device_y; float* device_result; // // DONE: allocate device memory buffers on the GPU using // cudaMalloc. The started code issues warnings on build because // these buffers are used in the call to saxpy_kernel below // without being initialized. // int arraySize = N * sizeof(float); cudaMalloc(&device_x, arraySize); cudaMalloc(&device_y, arraySize); cudaMalloc(&device_result, arraySize); // start timing after allocation of device memory. double startTime = CycleTimer::currentSeconds(); // // DONE: copy input arrays to the GPU using cudaMemcpy // cudaMemcpy(device_x, xarray, arraySize, cudaMemcpyHostToDevice); cudaMemcpy(device_y, yarray, arraySize, cudaMemcpyHostToDevice); // // DONE: insert time here to begin timing only the kernel // double kernelStartTime = CycleTimer::currentSeconds(); // run saxpy_kernel on the GPU saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result); // // DONE: insert timer here to time only the kernel. Since the // kernel will run asynchronously with the calling CPU thread, you // need to call cudaThreadSynchronize() before your timer to // ensure the kernel running on the GPU has completed. (Otherwise // you will incorrectly observe that almost no time elapses!) // cudaThreadSynchronize(); double kernelEndTime = CycleTimer::currentSeconds(); // // DONE: copy result from GPU using cudaMemcpy // cudaMemcpy(resultarray, device_result, arraySize, cudaMemcpyDeviceToHost); // end timing after result has been copied back into host memory. // The time elapsed between startTime and endTime is the total // time to copy data to the GPU, run the kernel, and copy the // result back to the CPU double endTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; printf("Overall time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); double kernelExDuration = kernelEndTime - kernelStartTime; printf("Kernel time: %.3f ms\n", 1000.f * kernelExDuration); // // DONE free memory buffers on the GPU // cudaFree(device_x); cudaFree(device_y); cudaFree(device_result); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
83dafb72856f70d527259ad142fae808ae8efa82.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> // helper functions for CUDA error check #include "md5cuda.h" uint32_t *dev_k = 0; uint32_t *dev_r = 0; __device__ void cuda_to_bytes(uint32_t val, uint8_t *bytes) { bytes[0] = (uint8_t) val; bytes[1] = (uint8_t) (val >> 8); bytes[2] = (uint8_t) (val >> 16); bytes[3] = (uint8_t) (val >> 24); } __device__ uint32_t cuda_to_int32(const uint8_t *bytes) { return (uint32_t) bytes[0] | ((uint32_t) bytes[1] << 8) | ((uint32_t) bytes[2] << 16) | ((uint32_t) bytes[3] << 24); } __global__ void md5kernel(const uint8_t *initial_msg, size_t initial_len, uint8_t *digest, const uint32_t *k, const uint32_t *r) { // These vars will contain the hash uint32_t h0, h1, h2, h3; // Message (to prepare) uint8_t *msg = NULL; size_t new_len, offset; uint32_t w[16]; uint32_t a, b, c, d, i, f, g, temp; // Initialize variables - simple count in nibbles: h0 = 0x67452301; h1 = 0xefcdab89; h2 = 0x98badcfe; h3 = 0x10325476; //Pre-processing: //append "1" bit to message //append "0" bits until message length in bits 448 (mod 512) //append length mod (2^64) to message for (new_len = initial_len + 1; new_len % (512/8) != 448/8; new_len++) ; msg = (uint8_t *)malloc(new_len + 8); memcpy(msg, initial_msg, initial_len); msg[initial_len] = 0x80; // append the "1" bit; most significant bit is "first" for (offset = initial_len + 1; offset < new_len; offset++) msg[offset] = 0; // append "0" bits // append the len in bits at the end of the buffer. cuda_to_bytes(initial_len*8, msg + new_len); // initial_len>>29 == initial_len*8>>32, but avoids overflow. cuda_to_bytes(initial_len>>29, msg + new_len + 4); // Process the message in successive 512-bit chunks: //for each 512-bit chunk of message: for(offset=0; offset<new_len; offset += (512/8)) { // break chunk into sixteen 32-bit words w[j], 0 j 15 for (i = 0; i < 16; i++) w[i] = cuda_to_int32(msg + offset + i*4); // Initialize hash value for this chunk: a = h0; b = h1; c = h2; d = h3; // Main loop: for(i = 0; i<64; i++) { if (i < 16) { f = (b & c) | ((~b) & d); g = i; } else if (i < 32) { f = (d & b) | ((~d) & c); g = (5*i + 1) % 16; } else if (i < 48) { f = b ^ c ^ d; g = (3*i + 5) % 16; } else { f = c ^ (b | (~d)); g = (7*i) % 16; } temp = d; d = c; c = b; b = b + LEFTROTATE((a + f + k[i] + w[g]), r[i]); a = temp; } // Add this chunk's hash to result so far: h0 += a; h1 += b; h2 += c; h3 += d; } // cleanup free(msg); //var char digest[16] := h0 append h1 append h2 append h3 //(Output is in little-endian) cuda_to_bytes(h0, digest); cuda_to_bytes(h1, digest + 4); cuda_to_bytes(h2, digest + 8); cuda_to_bytes(h3, digest + 12); } extern "C" { void md5WithCuda(const uint8_t *initial_msg, size_t initial_len, uint8_t *digest) { uint8_t *dev_initial_msg = 0; uint8_t *dev_digest = 0; // Allocate GPU buffers for three vectors (two input, one output). checkCudaErrors(hipMalloc((void**)&dev_k, k_size * sizeof(uint32_t))); checkCudaErrors(hipMalloc((void**)&dev_r, r_size * sizeof(uint32_t))); checkCudaErrors(hipMalloc((void**)&dev_digest, md5_size * sizeof(uint8_t))); checkCudaErrors(hipMalloc((void**)&dev_initial_msg, initial_len * sizeof(uint8_t))); // Copy input vectors from host memory to GPU buffers. checkCudaErrors(hipMemcpy(dev_initial_msg, initial_msg, initial_len * sizeof(uint8_t), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_k, cuda_k, k_size * sizeof(uint32_t), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_r, cuda_r, r_size * sizeof(uint32_t), hipMemcpyHostToDevice)); // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( md5kernel), dim3(1), dim3(initial_len), 0, 0, dev_initial_msg, initial_len, dev_digest, dev_k, dev_r); // Check for any errors launching the kernel hipError_t cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "md5Kernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. checkCudaErrors(hipDeviceSynchronize()); // Copy output vector from GPU buffer to host memory. checkCudaErrors(hipMemcpy(digest, dev_digest, md5_size * sizeof(uint8_t), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(dev_digest)); checkCudaErrors(hipFree(dev_initial_msg)); checkCudaErrors(hipFree(dev_k)); checkCudaErrors(hipFree(dev_r)); } }
83dafb72856f70d527259ad142fae808ae8efa82.cu
#include <stdio.h> #include <cuda.h> #include <helper_cuda.h> // helper functions for CUDA error check #include "md5cuda.h" uint32_t *dev_k = 0; uint32_t *dev_r = 0; __device__ void cuda_to_bytes(uint32_t val, uint8_t *bytes) { bytes[0] = (uint8_t) val; bytes[1] = (uint8_t) (val >> 8); bytes[2] = (uint8_t) (val >> 16); bytes[3] = (uint8_t) (val >> 24); } __device__ uint32_t cuda_to_int32(const uint8_t *bytes) { return (uint32_t) bytes[0] | ((uint32_t) bytes[1] << 8) | ((uint32_t) bytes[2] << 16) | ((uint32_t) bytes[3] << 24); } __global__ void md5kernel(const uint8_t *initial_msg, size_t initial_len, uint8_t *digest, const uint32_t *k, const uint32_t *r) { // These vars will contain the hash uint32_t h0, h1, h2, h3; // Message (to prepare) uint8_t *msg = NULL; size_t new_len, offset; uint32_t w[16]; uint32_t a, b, c, d, i, f, g, temp; // Initialize variables - simple count in nibbles: h0 = 0x67452301; h1 = 0xefcdab89; h2 = 0x98badcfe; h3 = 0x10325476; //Pre-processing: //append "1" bit to message //append "0" bits until message length in bits ≡ 448 (mod 512) //append length mod (2^64) to message for (new_len = initial_len + 1; new_len % (512/8) != 448/8; new_len++) ; msg = (uint8_t *)malloc(new_len + 8); memcpy(msg, initial_msg, initial_len); msg[initial_len] = 0x80; // append the "1" bit; most significant bit is "first" for (offset = initial_len + 1; offset < new_len; offset++) msg[offset] = 0; // append "0" bits // append the len in bits at the end of the buffer. cuda_to_bytes(initial_len*8, msg + new_len); // initial_len>>29 == initial_len*8>>32, but avoids overflow. cuda_to_bytes(initial_len>>29, msg + new_len + 4); // Process the message in successive 512-bit chunks: //for each 512-bit chunk of message: for(offset=0; offset<new_len; offset += (512/8)) { // break chunk into sixteen 32-bit words w[j], 0 ≤ j ≤ 15 for (i = 0; i < 16; i++) w[i] = cuda_to_int32(msg + offset + i*4); // Initialize hash value for this chunk: a = h0; b = h1; c = h2; d = h3; // Main loop: for(i = 0; i<64; i++) { if (i < 16) { f = (b & c) | ((~b) & d); g = i; } else if (i < 32) { f = (d & b) | ((~d) & c); g = (5*i + 1) % 16; } else if (i < 48) { f = b ^ c ^ d; g = (3*i + 5) % 16; } else { f = c ^ (b | (~d)); g = (7*i) % 16; } temp = d; d = c; c = b; b = b + LEFTROTATE((a + f + k[i] + w[g]), r[i]); a = temp; } // Add this chunk's hash to result so far: h0 += a; h1 += b; h2 += c; h3 += d; } // cleanup free(msg); //var char digest[16] := h0 append h1 append h2 append h3 //(Output is in little-endian) cuda_to_bytes(h0, digest); cuda_to_bytes(h1, digest + 4); cuda_to_bytes(h2, digest + 8); cuda_to_bytes(h3, digest + 12); } extern "C" { void md5WithCuda(const uint8_t *initial_msg, size_t initial_len, uint8_t *digest) { uint8_t *dev_initial_msg = 0; uint8_t *dev_digest = 0; // Allocate GPU buffers for three vectors (two input, one output). checkCudaErrors(cudaMalloc((void**)&dev_k, k_size * sizeof(uint32_t))); checkCudaErrors(cudaMalloc((void**)&dev_r, r_size * sizeof(uint32_t))); checkCudaErrors(cudaMalloc((void**)&dev_digest, md5_size * sizeof(uint8_t))); checkCudaErrors(cudaMalloc((void**)&dev_initial_msg, initial_len * sizeof(uint8_t))); // Copy input vectors from host memory to GPU buffers. checkCudaErrors(cudaMemcpy(dev_initial_msg, initial_msg, initial_len * sizeof(uint8_t), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_k, cuda_k, k_size * sizeof(uint32_t), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_r, cuda_r, r_size * sizeof(uint32_t), cudaMemcpyHostToDevice)); // Launch a kernel on the GPU with one thread for each element. md5kernel<<<1, initial_len>>>(dev_initial_msg, initial_len, dev_digest, dev_k, dev_r); // Check for any errors launching the kernel cudaError_t cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "md5Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. checkCudaErrors(cudaDeviceSynchronize()); // Copy output vector from GPU buffer to host memory. checkCudaErrors(cudaMemcpy(digest, dev_digest, md5_size * sizeof(uint8_t), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(dev_digest)); checkCudaErrors(cudaFree(dev_initial_msg)); checkCudaErrors(cudaFree(dev_k)); checkCudaErrors(cudaFree(dev_r)); } }
2bbaedec9bd96a22bd58aa69279884dee8cc3b96.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gtest/gtest.h> #include <ATen/hip/Atomic.cuh> #include <c10/test/util/Macros.h> #include <ATen/hip/HIPContext.h> #include <c10/hip/HIPException.h> #include <cmath> constexpr int blocksize = 256; constexpr int factor = 4; constexpr int arraysize = blocksize / factor; template <typename T> __global__ void addition_test_kernel(T * a, T * sum) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int idx = (tid) % arraysize; gpuAtomicAdd(&sum[idx], a[idx]); } template <typename T> __global__ void mul_test_kernel(T * a, T * sum) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int idx = (tid) % arraysize; gpuAtomicMul(&sum[idx], a[idx]); } template <typename T> __global__ void max_test_kernel(T * a, T * max) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int a_idx = (tid) % (arraysize * factor); int idx = a_idx / factor; gpuAtomicMax(&max[idx], a[a_idx]); } template <typename T> __global__ void min_test_kernel(T * a, T * min) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int a_idx = (tid) % (arraysize * factor); int idx = a_idx / factor; gpuAtomicMin(&min[idx], a[a_idx]); } template <typename T> void test_atomic_add() { dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); T *ad, *sumd; std::vector<T> a(arraysize); std::vector<T> sum(arraysize); std::vector<T> answer(arraysize); for (int i = 0; i < arraysize; ++i) { a[i] = 1; sum[i] = 0; answer[i] = factor; } hipMalloc((void**)&ad, arraysize * sizeof(T)); hipMalloc((void**)&sumd, arraysize * sizeof(T)); hipMemcpy(ad, a.data(), arraysize * sizeof(T), hipMemcpyHostToDevice); hipMemcpy(sumd, sum.data(), arraysize * sizeof(T), hipMemcpyHostToDevice); hipLaunchKernelGGL(( addition_test_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, sumd); C10_HIP_KERNEL_LAUNCH_CHECK(); hipMemcpy(sum.data(), sumd, arraysize * sizeof(T), hipMemcpyDeviceToHost); for (int i = 0; i < arraysize; ++i) { ASSERT_EQ(sum[i], answer[i]) << typeid(T).name(); } hipFree(ad); hipFree(sumd); } template <typename T> void test_atomic_mul() { dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); T *ad, *sumd; std::vector<T> a(arraysize); std::vector<T> sum(arraysize); std::vector<T> answer(arraysize); for (int i = 0; i < arraysize; ++i) { a[i] = 2; sum[i] = 2; answer[i] = pow(sum[i], static_cast<T>(factor + 1)); } hipMalloc((void**)&ad, arraysize * sizeof(T)); hipMalloc((void**)&sumd, arraysize * sizeof(T)); hipMemcpy(ad, a.data(), arraysize * sizeof(T), hipMemcpyHostToDevice); hipMemcpy(sumd, sum.data(), arraysize * sizeof(T), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mul_test_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, sumd); C10_HIP_KERNEL_LAUNCH_CHECK(); hipMemcpy(sum.data(), sumd, arraysize * sizeof(T), hipMemcpyDeviceToHost); for (int i = 0; i < arraysize; ++i) { ASSERT_EQ(sum[i], answer[i]) << typeid(T).name(); } hipFree(ad); hipFree(sumd); } template <typename T> void test_atomic_max() { dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); T *ad, *sumd; std::vector<T> a(arraysize * factor); std::vector<T> sum(arraysize); std::vector<T> answer(arraysize); int j; for (int i = 0; i < arraysize * factor; ++i) { a[i] = i; if (i % factor == 0) { j = i / factor; sum[j] = std::numeric_limits<T>::lowest(); answer[j] = (j + 1) * factor - 1; } } hipMalloc((void**)&ad, arraysize * factor * sizeof(T)); hipMalloc((void**)&sumd, arraysize * sizeof(T)); hipMemcpy(ad, a.data(), arraysize * factor * sizeof(T), hipMemcpyHostToDevice); hipMemcpy(sumd, sum.data(), arraysize * sizeof(T), hipMemcpyHostToDevice); hipLaunchKernelGGL(( max_test_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, sumd); C10_HIP_KERNEL_LAUNCH_CHECK(); hipMemcpy(sum.data(), sumd, arraysize * sizeof(T), hipMemcpyDeviceToHost); for (int i = 0; i < arraysize; ++i) { ASSERT_EQ(sum[i], answer[i]) << typeid(T).name(); } hipFree(ad); hipFree(sumd); } template <typename T> void test_atomic_min() { dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); T *ad, *sumd; std::vector<T> a(arraysize * factor); std::vector<T> sum(arraysize); std::vector<T> answer(arraysize); int j; for (int i = 0; i < arraysize * factor; ++i) { a[i] = i; if (i % factor == 0) { j = i / factor; sum[j] = std::numeric_limits<T>::max(); answer[j] = j * factor; } } hipMalloc((void**)&ad, arraysize * factor * sizeof(T)); hipMalloc((void**)&sumd, arraysize * sizeof(T)); hipMemcpy(ad, a.data(), arraysize * factor * sizeof(T), hipMemcpyHostToDevice); hipMemcpy(sumd, sum.data(), arraysize * sizeof(T), hipMemcpyHostToDevice); hipLaunchKernelGGL(( min_test_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, sumd); C10_HIP_KERNEL_LAUNCH_CHECK(); hipMemcpy(sum.data(), sumd, arraysize * sizeof(T), hipMemcpyDeviceToHost); for (int i = 0; i < arraysize; ++i) { ASSERT_EQ(sum[i], answer[i]) << typeid(T).name(); } hipFree(ad); hipFree(sumd); } TEST(TestAtomicOps, TestAtomicAdd) { if (!at::cuda::is_available()) return; test_atomic_add<uint8_t>(); test_atomic_add<int8_t>(); test_atomic_add<int16_t>(); test_atomic_add<int32_t>(); test_atomic_add<int64_t>(); test_atomic_add<at::BFloat16>(); test_atomic_add<at::Half>(); test_atomic_add<float>(); test_atomic_add<double>(); test_atomic_add<c10::complex<float> >(); test_atomic_add<c10::complex<double> >(); } TEST(TestAtomicOps, DISABLED_ON_WINDOWS(TestAtomicMul)) { if (!at::cuda::is_available()) return; test_atomic_mul<uint8_t>(); test_atomic_mul<int8_t>(); test_atomic_mul<int16_t>(); test_atomic_mul<int32_t>(); test_atomic_mul<int64_t>(); test_atomic_mul<at::BFloat16>(); test_atomic_mul<at::Half>(); test_atomic_mul<float>(); test_atomic_mul<double>(); } TEST(TestAtomicOps, DISABLED_ON_WINDOWS(TestAtomicMax)) { if (!at::cuda::is_available()) return; test_atomic_max<uint8_t>(); test_atomic_max<int8_t>(); test_atomic_max<int16_t>(); test_atomic_max<int32_t>(); test_atomic_max<int64_t>(); test_atomic_max<at::BFloat16>(); test_atomic_max<at::Half>(); test_atomic_max<float>(); test_atomic_max<double>(); } TEST(TestAtomicOps, DISABLED_ON_WINDOWS(TestAtomicMin)) { if (!at::cuda::is_available()) return; test_atomic_min<uint8_t>(); test_atomic_min<int8_t>(); test_atomic_min<int16_t>(); test_atomic_min<int32_t>(); test_atomic_min<int64_t>(); test_atomic_min<at::BFloat16>(); test_atomic_min<at::Half>(); test_atomic_min<float>(); test_atomic_min<double>(); }
2bbaedec9bd96a22bd58aa69279884dee8cc3b96.cu
#include <gtest/gtest.h> #include <ATen/cuda/Atomic.cuh> #include <c10/test/util/Macros.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAException.h> #include <cmath> constexpr int blocksize = 256; constexpr int factor = 4; constexpr int arraysize = blocksize / factor; template <typename T> __global__ void addition_test_kernel(T * a, T * sum) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int idx = (tid) % arraysize; gpuAtomicAdd(&sum[idx], a[idx]); } template <typename T> __global__ void mul_test_kernel(T * a, T * sum) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int idx = (tid) % arraysize; gpuAtomicMul(&sum[idx], a[idx]); } template <typename T> __global__ void max_test_kernel(T * a, T * max) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int a_idx = (tid) % (arraysize * factor); int idx = a_idx / factor; gpuAtomicMax(&max[idx], a[a_idx]); } template <typename T> __global__ void min_test_kernel(T * a, T * min) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int a_idx = (tid) % (arraysize * factor); int idx = a_idx / factor; gpuAtomicMin(&min[idx], a[a_idx]); } template <typename T> void test_atomic_add() { dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); T *ad, *sumd; std::vector<T> a(arraysize); std::vector<T> sum(arraysize); std::vector<T> answer(arraysize); for (int i = 0; i < arraysize; ++i) { a[i] = 1; sum[i] = 0; answer[i] = factor; } cudaMalloc((void**)&ad, arraysize * sizeof(T)); cudaMalloc((void**)&sumd, arraysize * sizeof(T)); cudaMemcpy(ad, a.data(), arraysize * sizeof(T), cudaMemcpyHostToDevice); cudaMemcpy(sumd, sum.data(), arraysize * sizeof(T), cudaMemcpyHostToDevice); addition_test_kernel<<<dimGrid, dimBlock>>>(ad, sumd); C10_CUDA_KERNEL_LAUNCH_CHECK(); cudaMemcpy(sum.data(), sumd, arraysize * sizeof(T), cudaMemcpyDeviceToHost); for (int i = 0; i < arraysize; ++i) { ASSERT_EQ(sum[i], answer[i]) << typeid(T).name(); } cudaFree(ad); cudaFree(sumd); } template <typename T> void test_atomic_mul() { dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); T *ad, *sumd; std::vector<T> a(arraysize); std::vector<T> sum(arraysize); std::vector<T> answer(arraysize); for (int i = 0; i < arraysize; ++i) { a[i] = 2; sum[i] = 2; answer[i] = pow(sum[i], static_cast<T>(factor + 1)); } cudaMalloc((void**)&ad, arraysize * sizeof(T)); cudaMalloc((void**)&sumd, arraysize * sizeof(T)); cudaMemcpy(ad, a.data(), arraysize * sizeof(T), cudaMemcpyHostToDevice); cudaMemcpy(sumd, sum.data(), arraysize * sizeof(T), cudaMemcpyHostToDevice); mul_test_kernel<<<dimGrid, dimBlock>>>(ad, sumd); C10_CUDA_KERNEL_LAUNCH_CHECK(); cudaMemcpy(sum.data(), sumd, arraysize * sizeof(T), cudaMemcpyDeviceToHost); for (int i = 0; i < arraysize; ++i) { ASSERT_EQ(sum[i], answer[i]) << typeid(T).name(); } cudaFree(ad); cudaFree(sumd); } template <typename T> void test_atomic_max() { dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); T *ad, *sumd; std::vector<T> a(arraysize * factor); std::vector<T> sum(arraysize); std::vector<T> answer(arraysize); int j; for (int i = 0; i < arraysize * factor; ++i) { a[i] = i; if (i % factor == 0) { j = i / factor; sum[j] = std::numeric_limits<T>::lowest(); answer[j] = (j + 1) * factor - 1; } } cudaMalloc((void**)&ad, arraysize * factor * sizeof(T)); cudaMalloc((void**)&sumd, arraysize * sizeof(T)); cudaMemcpy(ad, a.data(), arraysize * factor * sizeof(T), cudaMemcpyHostToDevice); cudaMemcpy(sumd, sum.data(), arraysize * sizeof(T), cudaMemcpyHostToDevice); max_test_kernel<<<dimGrid, dimBlock>>>(ad, sumd); C10_CUDA_KERNEL_LAUNCH_CHECK(); cudaMemcpy(sum.data(), sumd, arraysize * sizeof(T), cudaMemcpyDeviceToHost); for (int i = 0; i < arraysize; ++i) { ASSERT_EQ(sum[i], answer[i]) << typeid(T).name(); } cudaFree(ad); cudaFree(sumd); } template <typename T> void test_atomic_min() { dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); T *ad, *sumd; std::vector<T> a(arraysize * factor); std::vector<T> sum(arraysize); std::vector<T> answer(arraysize); int j; for (int i = 0; i < arraysize * factor; ++i) { a[i] = i; if (i % factor == 0) { j = i / factor; sum[j] = std::numeric_limits<T>::max(); answer[j] = j * factor; } } cudaMalloc((void**)&ad, arraysize * factor * sizeof(T)); cudaMalloc((void**)&sumd, arraysize * sizeof(T)); cudaMemcpy(ad, a.data(), arraysize * factor * sizeof(T), cudaMemcpyHostToDevice); cudaMemcpy(sumd, sum.data(), arraysize * sizeof(T), cudaMemcpyHostToDevice); min_test_kernel<<<dimGrid, dimBlock>>>(ad, sumd); C10_CUDA_KERNEL_LAUNCH_CHECK(); cudaMemcpy(sum.data(), sumd, arraysize * sizeof(T), cudaMemcpyDeviceToHost); for (int i = 0; i < arraysize; ++i) { ASSERT_EQ(sum[i], answer[i]) << typeid(T).name(); } cudaFree(ad); cudaFree(sumd); } TEST(TestAtomicOps, TestAtomicAdd) { if (!at::cuda::is_available()) return; test_atomic_add<uint8_t>(); test_atomic_add<int8_t>(); test_atomic_add<int16_t>(); test_atomic_add<int32_t>(); test_atomic_add<int64_t>(); test_atomic_add<at::BFloat16>(); test_atomic_add<at::Half>(); test_atomic_add<float>(); test_atomic_add<double>(); test_atomic_add<c10::complex<float> >(); test_atomic_add<c10::complex<double> >(); } TEST(TestAtomicOps, DISABLED_ON_WINDOWS(TestAtomicMul)) { if (!at::cuda::is_available()) return; test_atomic_mul<uint8_t>(); test_atomic_mul<int8_t>(); test_atomic_mul<int16_t>(); test_atomic_mul<int32_t>(); test_atomic_mul<int64_t>(); test_atomic_mul<at::BFloat16>(); test_atomic_mul<at::Half>(); test_atomic_mul<float>(); test_atomic_mul<double>(); } TEST(TestAtomicOps, DISABLED_ON_WINDOWS(TestAtomicMax)) { if (!at::cuda::is_available()) return; test_atomic_max<uint8_t>(); test_atomic_max<int8_t>(); test_atomic_max<int16_t>(); test_atomic_max<int32_t>(); test_atomic_max<int64_t>(); test_atomic_max<at::BFloat16>(); test_atomic_max<at::Half>(); test_atomic_max<float>(); test_atomic_max<double>(); } TEST(TestAtomicOps, DISABLED_ON_WINDOWS(TestAtomicMin)) { if (!at::cuda::is_available()) return; test_atomic_min<uint8_t>(); test_atomic_min<int8_t>(); test_atomic_min<int16_t>(); test_atomic_min<int32_t>(); test_atomic_min<int64_t>(); test_atomic_min<at::BFloat16>(); test_atomic_min<at::Half>(); test_atomic_min<float>(); test_atomic_min<double>(); }
a4cc00c870bc3145b4f7304e7baf2307dc1c9b54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" code = """ #include<iostream> #include <cstdio> #include <cstdlib> #include<cuda_runtime.h> using namespace std; __global__ void minimum(int *input) { int step_size = 1; int tid = threadIdx.x; int num_threads = blockDim.x; while(num_threads > 0) { if(tid < num_threads) { int first = tid*step_size*2; int second = first+step_size; if(input[second] < input[first]) input[first] = input[second]; } step_size *= 2; num_threads /= 2; } } __global__ void maximum(int *input) { int step_size = 1; int tid = threadIdx.x; int num_threads = blockDim.x; while(num_threads > 0) { if(tid < num_threads) { int first = tid*step_size*2; int second = first+step_size; if(input[second] > input[first]) input[first] = input[second]; } step_size *= 2; num_threads /= 2; } } int main() { int n; cin>>n; srand(n); int a[n]; int min = 20000; for(int i=0;i<n;i++) { a[i] = rand()%20000; if(a[i] < min) min = a[i]; cout<<a[i]<<" "; } int size = n*sizeof(int); int *arr,result; hipMalloc(&arr,size); hipMemcpy(arr,a,size,hipMemcpyHostToDevice); minimum<<<1,n/2>>>(arr); hipMemcpy(&result,arr,sizeof(int),hipMemcpyDeviceToHost); cout<<"The minimum is :- "<<result; int *arr1,result1; hipMalloc(&arr1,size); hipMemcpy(arr1,a,size,hipMemcpyHostToDevice); maximum<<<1,n/2>>>(arr1); hipMemcpy(&result1,arr1,sizeof(int),hipMemcpyDeviceToHost); cout<<"The maximum is :- "<<result1; hipFree(arr); hipFree(arr1); } """
a4cc00c870bc3145b4f7304e7baf2307dc1c9b54.cu
code = """ #include<iostream> #include <cstdio> #include <cstdlib> #include<cuda_runtime.h> using namespace std; __global__ void minimum(int *input) { int step_size = 1; int tid = threadIdx.x; int num_threads = blockDim.x; while(num_threads > 0) { if(tid < num_threads) { int first = tid*step_size*2; int second = first+step_size; if(input[second] < input[first]) input[first] = input[second]; } step_size *= 2; num_threads /= 2; } } __global__ void maximum(int *input) { int step_size = 1; int tid = threadIdx.x; int num_threads = blockDim.x; while(num_threads > 0) { if(tid < num_threads) { int first = tid*step_size*2; int second = first+step_size; if(input[second] > input[first]) input[first] = input[second]; } step_size *= 2; num_threads /= 2; } } int main() { int n; cin>>n; srand(n); int a[n]; int min = 20000; for(int i=0;i<n;i++) { a[i] = rand()%20000; if(a[i] < min) min = a[i]; cout<<a[i]<<" "; } int size = n*sizeof(int); int *arr,result; cudaMalloc(&arr,size); cudaMemcpy(arr,a,size,cudaMemcpyHostToDevice); minimum<<<1,n/2>>>(arr); cudaMemcpy(&result,arr,sizeof(int),cudaMemcpyDeviceToHost); cout<<"The minimum is :- "<<result; int *arr1,result1; cudaMalloc(&arr1,size); cudaMemcpy(arr1,a,size,cudaMemcpyHostToDevice); maximum<<<1,n/2>>>(arr1); cudaMemcpy(&result1,arr1,sizeof(int),cudaMemcpyDeviceToHost); cout<<"The maximum is :- "<<result1; cudaFree(arr); cudaFree(arr1); } """