hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
379972094b44f221ab1fb5c54d6823b60f8c46e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 10; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __global__ void sort(int *a,int i,int n) { int tid = threadIdx.x; int p; int temp; if(i%2==0) { p=tid*2; if(a[p]>a[p+1]) { temp = a[p]; a[p] = a[p+1]; a[p+1] =temp; } } else { p=tid*2+1; if(p<n-1) { if(a[p]>a[p+1]) { temp = a[p]; a[p] = a[p+1]; a[p+1] =temp; } } } } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(void) { int a[WORK_SIZE]; int i; int *da; CUDA_CHECK_RETURN(hipMalloc((void**) &da, sizeof(int) * WORK_SIZE)); for(i=0;i<WORK_SIZE;i++) { printf("%d:",i); scanf("%d",&a[i]); } CUDA_CHECK_RETURN(hipMemcpy(da, a, sizeof(int) * WORK_SIZE, hipMemcpyHostToDevice)); for(i=0;i<WORK_SIZE;i++) { hipLaunchKernelGGL(( sort), dim3(1),dim3(WORK_SIZE/2), 0, 0, da,i,WORK_SIZE); } CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(hipGetLastError()); CUDA_CHECK_RETURN(hipMemcpy(a, da, sizeof(int) * WORK_SIZE, hipMemcpyDeviceToHost)); for(i=0;i<WORK_SIZE;i++) { printf("%d\t",a[i]); } printf("\n"); CUDA_CHECK_RETURN(hipFree((void*) da)); return 0; } Last login: Thu Mar 19 15:29:51 2015 from 10.80.0.68 echo $PWD'>' /bin/sh -c "cd \"/tmp/nsight-debug\";\"/tmp/nsight-debug/OddEvenSort\"";exit cuda-admin@cuda-admin:~$ echo $PWD'>' /home/cuda-admin> cuda-admin@cuda-admin:~$ /bin/sh -c "cd \"/tmp/nsight-debug\";\"/tmp/nsight-debu g/OddEvenSort\"";exit 0 : 5 5 1 : 6 6 2 : 9 9 3 : 7 7 4 : 1 1 5 : 3 3 6 : 8 8 7 : 4 4 8 : 2 2 9 : 0 0 0 1 2 3 4 5 6 7 8 9 logout
379972094b44f221ab1fb5c54d6823b60f8c46e0.cu
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> static const int WORK_SIZE = 10; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __global__ void sort(int *a,int i,int n) { int tid = threadIdx.x; int p; int temp; if(i%2==0) { p=tid*2; if(a[p]>a[p+1]) { temp = a[p]; a[p] = a[p+1]; a[p+1] =temp; } } else { p=tid*2+1; if(p<n-1) { if(a[p]>a[p+1]) { temp = a[p]; a[p] = a[p+1]; a[p+1] =temp; } } } } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(void) { int a[WORK_SIZE]; int i; int *da; CUDA_CHECK_RETURN(cudaMalloc((void**) &da, sizeof(int) * WORK_SIZE)); for(i=0;i<WORK_SIZE;i++) { printf("%d:",i); scanf("%d",&a[i]); } CUDA_CHECK_RETURN(cudaMemcpy(da, a, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice)); for(i=0;i<WORK_SIZE;i++) { sort<<<1,WORK_SIZE/2>>>(da,i,WORK_SIZE); } CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaMemcpy(a, da, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost)); for(i=0;i<WORK_SIZE;i++) { printf("%d\t",a[i]); } printf("\n"); CUDA_CHECK_RETURN(cudaFree((void*) da)); return 0; } Last login: Thu Mar 19 15:29:51 2015 from 10.80.0.68 echo $PWD'>' /bin/sh -c "cd \"/tmp/nsight-debug\";\"/tmp/nsight-debug/OddEvenSort\"";exit cuda-admin@cuda-admin:~$ echo $PWD'>' /home/cuda-admin> cuda-admin@cuda-admin:~$ /bin/sh -c "cd \"/tmp/nsight-debug\";\"/tmp/nsight-debu g/OddEvenSort\"";exit 0 : 5 5 1 : 6 6 2 : 9 9 3 : 7 7 4 : 1 1 5 : 3 3 6 : 8 8 7 : 4 4 8 : 2 2 9 : 0 0 0 1 2 3 4 5 6 7 8 9 logout
81aba7e00875965ea2bd63ce7f9aa427bc01eb0b.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> int main(int argc, char **argv) { int nElem = 1024; dim3 block (1024); dim3 grid ((nElem + block.x - 1) / block.x); printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 512; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 256; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 128; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); CHECK(hipDeviceReset()); return(0); }
81aba7e00875965ea2bd63ce7f9aa427bc01eb0b.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> int main(int argc, char **argv) { int nElem = 1024; dim3 block (1024); dim3 grid ((nElem + block.x - 1) / block.x); printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 512; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 256; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); block.x = 128; grid.x = (nElem + block.x - 1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); CHECK(cudaDeviceReset()); return(0); }
34eeb455236ce550c9b078f6f16a256101ffb28a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "max_pooling.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *original_img = NULL; hipMalloc(&original_img, XSIZE*YSIZE); unsigned char *new_img = NULL; hipMalloc(&new_img, XSIZE*YSIZE); unsigned int width = 1; unsigned int num_thread = 1; unsigned int size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( max_pooling), dim3(gridBlock),dim3(threadBlock), 0, 0, original_img,new_img,width,num_thread,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( max_pooling), dim3(gridBlock),dim3(threadBlock), 0, 0, original_img,new_img,width,num_thread,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( max_pooling), dim3(gridBlock),dim3(threadBlock), 0, 0, original_img,new_img,width,num_thread,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
34eeb455236ce550c9b078f6f16a256101ffb28a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "max_pooling.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *original_img = NULL; cudaMalloc(&original_img, XSIZE*YSIZE); unsigned char *new_img = NULL; cudaMalloc(&new_img, XSIZE*YSIZE); unsigned int width = 1; unsigned int num_thread = 1; unsigned int size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); max_pooling<<<gridBlock,threadBlock>>>(original_img,new_img,width,num_thread,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { max_pooling<<<gridBlock,threadBlock>>>(original_img,new_img,width,num_thread,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { max_pooling<<<gridBlock,threadBlock>>>(original_img,new_img,width,num_thread,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fef834f5ab806df50b793097e8ff405d8e07b197.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_vector.h> #include <thrust/random.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <iostream> #include <fmt/format.h> #include <ctime> #include <cmath> const float SOFTENING = 1e-9f; const int BLOCK_SIZE = 256; /** * Struct representing a body */ struct Body { float3 position, velocity; float mass; __host__ __device__ Body(float3 _position, float3 _velocity, float _mass): position(_position), velocity(_velocity), mass(_mass) {}; __host__ __device__ Body() : position(float3{0., 0., 0.}), velocity(float3{0., 0., 0.}), mass{1.} {}; }; /** * Image Coordinates */ struct ImageCoord { int x, y, imageDim; __host__ __device__ // create coordinate from components ImageCoord(int _x, int _y, int _imageDim) : x(_x), y(_y), imageDim(_imageDim) { }; __host__ __device__ // default initialization ImageCoord() : x(0), y(0), imageDim(0) { }; __host__ __device__ // create image coordinate from a body's position ImageCoord(const Body &body, int _imageDim) : imageDim(_imageDim) { auto halfImageDim = imageDim / 2; x = (body.position.x + 1.) * halfImageDim; y = (body.position.y + 1.) * halfImageDim; }; __host__ __device__ int toOffset() { return x + y * imageDim; }; }; /** * Program to initialize bodies with random values between -1,1 */ struct initRandomPrg { float minValue, maxValue; int seed; __host__ __device__ initRandomPrg(int _seed = 0, float _mnV = -1.f, float _mxV = 1.f) : seed(_seed), minValue(_mnV), maxValue(_mxV) { }; __host__ __device__ Body operator()(const unsigned int idx) const { thrust::default_random_engine rng(seed); thrust::uniform_real_distribution<float> dist(minValue, maxValue); rng.discard(idx); return Body { float3{dist(rng), dist(rng), dist(rng)}, float3{dist(rng), dist(rng),dist(rng)}, dist(rng) }; } }; __device__ float3 bodyInteraction(Body &bi, Body &bj, float3 accel) { float3 r; r.x = bi.position.x - bj.position.x; r.y = bi.position.y - bj.position.y; r.z = bi.position.z - bj.position.z; auto distSqr = r.x * r.x + r.y * r.y + r.z * r.z + SOFTENING; auto distSixth = distSqr * distSqr * distSqr; auto invDistCube = 1.0f / sqrtf(distSixth); auto s = bj.mass * invDistCube; accel.x += r.x * s; accel.y += r.y * s; accel.z += r.z * s; return accel; } __global__ void updateVelocities(Body *bodies, int bodyCount, float dt) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (bodyCount <= idx) return; float3 accel{0., 0., 0.}; auto body = bodies[idx]; for(int tile = 0; tile < gridDim.x; tile++) { // __shared__ Body sharedBodies[BLOCK_SIZE]; // Body tBody = bodies[tile * blockDim.x + threadIdx.x]; // sharedBodies[threadIdx.x] = Body{tBody.position, tBody.velocity, tBody.mass}; // __syncthreads(); for(int j = 0; j < BLOCK_SIZE; j++) { // accel = bodyInteraction(body, sharedBodies[j], accel); accel = bodyInteraction(body, bodies[j], accel); } // __syncthreads(); } // update blockBody velocity body.velocity.x += accel.x * dt; body.velocity.y += accel.y * dt; body.velocity.z += accel.z * dt; } int main(int argc, char **argv) { int const BODY_COUNT = 10000; const float dt = 0.01f; int numBlocks = (BODY_COUNT + BLOCK_SIZE - 1) / BLOCK_SIZE; // initialize bodies auto bodies = thrust::device_vector<Body>(BODY_COUNT); auto bodies_ptr = thrust::raw_pointer_cast(bodies.data()); auto index_sequence_begin = thrust::counting_iterator<unsigned int>(0); thrust::transform(index_sequence_begin, index_sequence_begin + BODY_COUNT, bodies.begin(), initRandomPrg(std::time(0))); std::cout << fmt::format("Initialize Bodies: {}\n\n", BODY_COUNT); // calculate forces index_sequence_begin = thrust::counting_iterator<unsigned int>(0); auto forces = thrust::device_vector<float3>(BODY_COUNT); auto forces_ptr = thrust::raw_pointer_cast(forces.data()); auto h_forces = thrust::host_vector<float3>(BODY_COUNT); std::cout << "Calculate Velocities" << "\n\n"; hipLaunchKernelGGL(( updateVelocities), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, bodies_ptr, BODY_COUNT, dt); hipDeviceSynchronize(); std::cout << "Return" << "\n\n"; return 0; }
fef834f5ab806df50b793097e8ff405d8e07b197.cu
#include <thrust/device_vector.h> #include <thrust/random.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <iostream> #include <fmt/format.h> #include <ctime> #include <cmath> const float SOFTENING = 1e-9f; const int BLOCK_SIZE = 256; /** * Struct representing a body */ struct Body { float3 position, velocity; float mass; __host__ __device__ Body(float3 _position, float3 _velocity, float _mass): position(_position), velocity(_velocity), mass(_mass) {}; __host__ __device__ Body() : position(float3{0., 0., 0.}), velocity(float3{0., 0., 0.}), mass{1.} {}; }; /** * Image Coordinates */ struct ImageCoord { int x, y, imageDim; __host__ __device__ // create coordinate from components ImageCoord(int _x, int _y, int _imageDim) : x(_x), y(_y), imageDim(_imageDim) { }; __host__ __device__ // default initialization ImageCoord() : x(0), y(0), imageDim(0) { }; __host__ __device__ // create image coordinate from a body's position ImageCoord(const Body &body, int _imageDim) : imageDim(_imageDim) { auto halfImageDim = imageDim / 2; x = (body.position.x + 1.) * halfImageDim; y = (body.position.y + 1.) * halfImageDim; }; __host__ __device__ int toOffset() { return x + y * imageDim; }; }; /** * Program to initialize bodies with random values between -1,1 */ struct initRandomPrg { float minValue, maxValue; int seed; __host__ __device__ initRandomPrg(int _seed = 0, float _mnV = -1.f, float _mxV = 1.f) : seed(_seed), minValue(_mnV), maxValue(_mxV) { }; __host__ __device__ Body operator()(const unsigned int idx) const { thrust::default_random_engine rng(seed); thrust::uniform_real_distribution<float> dist(minValue, maxValue); rng.discard(idx); return Body { float3{dist(rng), dist(rng), dist(rng)}, float3{dist(rng), dist(rng),dist(rng)}, dist(rng) }; } }; __device__ float3 bodyInteraction(Body &bi, Body &bj, float3 accel) { float3 r; r.x = bi.position.x - bj.position.x; r.y = bi.position.y - bj.position.y; r.z = bi.position.z - bj.position.z; auto distSqr = r.x * r.x + r.y * r.y + r.z * r.z + SOFTENING; auto distSixth = distSqr * distSqr * distSqr; auto invDistCube = 1.0f / sqrtf(distSixth); auto s = bj.mass * invDistCube; accel.x += r.x * s; accel.y += r.y * s; accel.z += r.z * s; return accel; } __global__ void updateVelocities(Body *bodies, int bodyCount, float dt) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (bodyCount <= idx) return; float3 accel{0., 0., 0.}; auto body = bodies[idx]; for(int tile = 0; tile < gridDim.x; tile++) { // __shared__ Body sharedBodies[BLOCK_SIZE]; // Body tBody = bodies[tile * blockDim.x + threadIdx.x]; // sharedBodies[threadIdx.x] = Body{tBody.position, tBody.velocity, tBody.mass}; // __syncthreads(); for(int j = 0; j < BLOCK_SIZE; j++) { // accel = bodyInteraction(body, sharedBodies[j], accel); accel = bodyInteraction(body, bodies[j], accel); } // __syncthreads(); } // update blockBody velocity body.velocity.x += accel.x * dt; body.velocity.y += accel.y * dt; body.velocity.z += accel.z * dt; } int main(int argc, char **argv) { int const BODY_COUNT = 10000; const float dt = 0.01f; int numBlocks = (BODY_COUNT + BLOCK_SIZE - 1) / BLOCK_SIZE; // initialize bodies auto bodies = thrust::device_vector<Body>(BODY_COUNT); auto bodies_ptr = thrust::raw_pointer_cast(bodies.data()); auto index_sequence_begin = thrust::counting_iterator<unsigned int>(0); thrust::transform(index_sequence_begin, index_sequence_begin + BODY_COUNT, bodies.begin(), initRandomPrg(std::time(0))); std::cout << fmt::format("Initialize Bodies: {}\n\n", BODY_COUNT); // calculate forces index_sequence_begin = thrust::counting_iterator<unsigned int>(0); auto forces = thrust::device_vector<float3>(BODY_COUNT); auto forces_ptr = thrust::raw_pointer_cast(forces.data()); auto h_forces = thrust::host_vector<float3>(BODY_COUNT); std::cout << "Calculate Velocities" << "\n\n"; updateVelocities<<<numBlocks, BLOCK_SIZE>>>(bodies_ptr, BODY_COUNT, dt); cudaDeviceSynchronize(); std::cout << "Return" << "\n\n"; return 0; }
6110ab2bc57347d1e9258fe32c72d80b8c676db4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Mark Gates @generated from magmablas/zlanhe.cu normal z -> d, Tue Feb 9 16:05:29 2016 */ #include "magma_internal.h" #include "magma_templates.h" #define inf_bs 32 #define max_bs 64 #define PRECISION_d #define REAL /* ====================================================================== */ /* inf-norm */ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */ __global__ void dlansy_inf_kernel_lower( int n, const double * __restrict__ A, int lda, double * __restrict__ dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; double res = 0.; __shared__ double la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block rows A += ind; A += ty * lda; // ---------- // loop over all blocks left of the diagonal block for (int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy lower triangle to upper triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for (int i=ty*8; i < ty*8 + 8; i++) { if ( i < tx ) { la[i][tx] = la[tx][i]; } #ifdef COMPLEX else if ( i == tx ) { la[i][i] = MAGMA_D_MAKE( MAGMA_D_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks below diagonal block A += inf_bs; for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; } A += inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is (n_mod_bs by inf_bs) if ( n_mod_bs > 0 ) { // load block (transposed), with zeros for rows outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } else { la[ty+j][tx] = MAGMA_D_ZERO; } } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_D_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_D_REAL( la[tx][1] ) + MAGMA_D_REAL( la[tx][2] ) + MAGMA_D_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block row // Threads past end of matrix (i.e., ind >= n) are redundantly assigned // the last row (n-1). At the end, those results are ignored -- only // results for ind < n are saved into dwork. if ( tx < n_mod_bs ) { A += ind; } else { A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; // ---------- // loop over all blocks left of the diagonal block // each is (n_mod_bs by inf_bs) for (int i=0; i < diag; i += inf_bs ) { // load block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for (int j=0; j < 8; j++) { res += MAGMA_D_ABS( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // sum rows left of diagonal for (int j=0; j < tx; j++) { res += MAGMA_D_ABS( *A ); A += lda; } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_D_REAL( *A )); A += 1; // sum column below diagonal for (int j=tx+1; j < n_mod_bs; j++) { res += MAGMA_D_ABS( *A ); A += 1; } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_D_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_D_REAL( la[tx][1] ) + MAGMA_D_REAL( la[tx][2] ) + MAGMA_D_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). * The upper implementation is similar to lower, but processes blocks * in the transposed order: * lower goes from left over to diagonal, then down to bottom; * upper goes from top down to diagonal, then over to right. * Differences are noted with # in comments. */ __global__ void dlansy_inf_kernel_upper( int n, const double * __restrict__ A, int lda, double * __restrict__ dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; double res = 0.; __shared__ double la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block #columns A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block for (int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block (#transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; //# } A += inf_bs; //# __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy #upper triangle to #lower triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for (int i=ty*8; i < ty*8 + 8; i++) { if ( i > tx ) { //# la[i][tx] = la[tx][i]; } #ifdef COMPLEX else if ( i == tx ) { la[i][i] = MAGMA_D_MAKE( MAGMA_D_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks #right of diagonal block A += inf_bs*lda; //# for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (#non-transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; //# } A += inf_bs*lda; //# __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is #(inf_bs by n_mod_bs) if ( n_mod_bs > 0 ) { // load block (#non-transposed), with zeros for #cols outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { //# la[tx][ty+j] = A[j*lda]; //# } else { la[tx][ty+j] = MAGMA_D_ZERO; //# } } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_D_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_D_REAL( la[tx][1] ) + MAGMA_D_REAL( la[tx][2] ) + MAGMA_D_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block #column // Instead of assigning threads ind >= n to the last row (n-1), as in Lower, // Upper simply adjusts loop bounds to avoid loading columns outside the matrix. // Again, at the end, those results are ignored -- only // results for ind < n are saved into dwork. A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block // each is #(inf_bs by n_mod_bs) for (int i=0; i < diag; i += inf_bs ) { // load block (#transposed), #ignoring columns outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } } A += inf_bs; //# __syncthreads(); // partial row sums #pragma unroll 8 for (int j=0; j < 8; j++) { res += MAGMA_D_ABS( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // #transpose pointer within diagonal block // #i.e., from A = A(tx,ty), transpose to A = A(ty,tx). A = A - tx - ty*lda + tx*lda + ty; // sum #column above diagonal for (int j=0; j < tx; j++) { res += MAGMA_D_ABS( *A ); A += 1; //# } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_D_REAL( *A )); A += lda; //# // sum #row right of diagonal for (int j=tx+1; j < n_mod_bs; j++) { res += MAGMA_D_ABS( *A ); A += lda; //# } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_D_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_D_REAL( la[tx][1] ) + MAGMA_D_REAL( la[tx][2] ) + MAGMA_D_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void dlansy_inf( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaDouble_ptr dwork, magma_queue_t queue ) { dim3 threads( inf_bs, 4 ); dim3 grid( magma_ceildiv( n, inf_bs ), 1 ); magma_int_t n_full_block = (n - n % inf_bs) / inf_bs; magma_int_t n_mod_bs = n % inf_bs; if ( uplo == MagmaLower) { hipLaunchKernelGGL(( dlansy_inf_kernel_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, dwork, n_full_block, n_mod_bs ); } else { hipLaunchKernelGGL(( dlansy_inf_kernel_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, dwork, n_full_block, n_mod_bs ); } } /* ====================================================================== */ /* max-norm */ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void dlansy_max_kernel_lower( int n, const double * __restrict__ A, int lda, double * __restrict__ dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; double res = 0; if (ind < n) { A += ind; for (int j=0; j < ind; ++j) { res = max_nan( res, MAGMA_D_ABS( *A )); A += lda; } // diagonal element (ignoring imaginary part) res = max_nan( res, MAGMA_D_ABS( MAGMA_D_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */ __global__ void dlansy_max_kernel_upper( int n, const double * __restrict__ A, int lda, double * __restrict__ dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; double res = 0; if (ind < n) { A += ind; A += (n-1)*lda; for (int j=n-1; j > ind; j--) { res = max_nan( res, MAGMA_D_ABS( *A )); A -= lda; } // diagonal element (ignoring imaginary part) res = max_nan( res, MAGMA_D_ABS( MAGMA_D_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void dlansy_max( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaDouble_ptr dwork, magma_queue_t queue ) { dim3 threads( max_bs ); dim3 grid( magma_ceildiv( n, max_bs ) ); if ( uplo == MagmaLower ) { hipLaunchKernelGGL(( dlansy_max_kernel_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, dwork ); } else { hipLaunchKernelGGL(( dlansy_max_kernel_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, dwork ); } } /* ====================================================================== */ /** Purpose ------- DLANSY returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric matrix A. DLANSY = ( max(abs(A(i,j))), NORM = MagmaMaxNorm ( ( norm1(A), NORM = MagmaOneNorm ( ( normI(A), NORM = MagmaInfNorm ( ( normF(A), NORM = MagmaFrobeniusNorm ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. On error, returns DLANSY < 0: if DLANSY = -i, the i-th argument had an illegal value. Arguments: ---------- @param[in] norm magma_norm_t Specifies the value to be returned in DLANSY as described above. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced. - = MagmaUpper: Upper triangular part of A is referenced - = MagmaLower: Lower triangular part of A is referenced @param[in] n INTEGER The order of the matrix A. N >= 0. When N = 0, DLANSY is set to zero. @param[in] dA DOUBLE PRECISION array on the GPU, dimension (LDDA,N) The symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(N,1). @param dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is required only for norm1 and normI. Here max-norm also requires WORK. @param[in] lwork INTEGER The dimension of the array DWORK. LWORK >= max( 1, N ). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" double magmablas_dlansy_q( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dwork, magma_int_t lwork, magma_queue_t queue ) { magma_int_t info = 0; // 1-norm == inf-norm since A is symmetric bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm); bool max_norm = (norm == MagmaMaxNorm); // inf_norm Double-Complex requires > 16 KB shared data (arch >= 200) #if defined(PRECISION_z) const bool inf_implemented = (magma_getdevice_arch() >= 200); #else const bool inf_implemented = true; #endif if ( ! (max_norm || (inf_norm && inf_implemented)) ) info = -1; else if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < n ) info = -5; else if ( lwork < n ) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; double res = 0; if ( inf_norm ) { dlansy_inf( uplo, n, dA, ldda, dwork, queue ); } else { dlansy_max( uplo, n, dA, ldda, dwork, queue ); } hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , n, dwork ); magma_dgetvector( 1, &dwork[0], 1, &res, 1, queue ); return res; }
6110ab2bc57347d1e9258fe32c72d80b8c676db4.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Mark Gates @generated from magmablas/zlanhe.cu normal z -> d, Tue Feb 9 16:05:29 2016 */ #include "magma_internal.h" #include "magma_templates.h" #define inf_bs 32 #define max_bs 64 #define PRECISION_d #define REAL /* ====================================================================== */ /* inf-norm */ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */ __global__ void dlansy_inf_kernel_lower( int n, const double * __restrict__ A, int lda, double * __restrict__ dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; double res = 0.; __shared__ double la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block rows A += ind; A += ty * lda; // ---------- // loop over all blocks left of the diagonal block for (int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy lower triangle to upper triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for (int i=ty*8; i < ty*8 + 8; i++) { if ( i < tx ) { la[i][tx] = la[tx][i]; } #ifdef COMPLEX else if ( i == tx ) { la[i][i] = MAGMA_D_MAKE( MAGMA_D_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks below diagonal block A += inf_bs; for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; } A += inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is (n_mod_bs by inf_bs) if ( n_mod_bs > 0 ) { // load block (transposed), with zeros for rows outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } else { la[ty+j][tx] = MAGMA_D_ZERO; } } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_D_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_D_REAL( la[tx][1] ) + MAGMA_D_REAL( la[tx][2] ) + MAGMA_D_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block row // Threads past end of matrix (i.e., ind >= n) are redundantly assigned // the last row (n-1). At the end, those results are ignored -- only // results for ind < n are saved into dwork. if ( tx < n_mod_bs ) { A += ind; } else { A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; // ---------- // loop over all blocks left of the diagonal block // each is (n_mod_bs by inf_bs) for (int i=0; i < diag; i += inf_bs ) { // load block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for (int j=0; j < 8; j++) { res += MAGMA_D_ABS( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // sum rows left of diagonal for (int j=0; j < tx; j++) { res += MAGMA_D_ABS( *A ); A += lda; } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_D_REAL( *A )); A += 1; // sum column below diagonal for (int j=tx+1; j < n_mod_bs; j++) { res += MAGMA_D_ABS( *A ); A += 1; } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_D_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_D_REAL( la[tx][1] ) + MAGMA_D_REAL( la[tx][2] ) + MAGMA_D_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). * The upper implementation is similar to lower, but processes blocks * in the transposed order: * lower goes from left over to diagonal, then down to bottom; * upper goes from top down to diagonal, then over to right. * Differences are noted with # in comments. */ __global__ void dlansy_inf_kernel_upper( int n, const double * __restrict__ A, int lda, double * __restrict__ dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; double res = 0.; __shared__ double la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block #columns A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block for (int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block (#transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; //# } A += inf_bs; //# __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy #upper triangle to #lower triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for (int i=ty*8; i < ty*8 + 8; i++) { if ( i > tx ) { //# la[i][tx] = la[tx][i]; } #ifdef COMPLEX else if ( i == tx ) { la[i][i] = MAGMA_D_MAKE( MAGMA_D_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks #right of diagonal block A += inf_bs*lda; //# for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (#non-transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; //# } A += inf_bs*lda; //# __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is #(inf_bs by n_mod_bs) if ( n_mod_bs > 0 ) { // load block (#non-transposed), with zeros for #cols outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { //# la[tx][ty+j] = A[j*lda]; //# } else { la[tx][ty+j] = MAGMA_D_ZERO; //# } } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_D_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_D_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_D_REAL( la[tx][1] ) + MAGMA_D_REAL( la[tx][2] ) + MAGMA_D_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block #column // Instead of assigning threads ind >= n to the last row (n-1), as in Lower, // Upper simply adjusts loop bounds to avoid loading columns outside the matrix. // Again, at the end, those results are ignored -- only // results for ind < n are saved into dwork. A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block // each is #(inf_bs by n_mod_bs) for (int i=0; i < diag; i += inf_bs ) { // load block (#transposed), #ignoring columns outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } } A += inf_bs; //# __syncthreads(); // partial row sums #pragma unroll 8 for (int j=0; j < 8; j++) { res += MAGMA_D_ABS( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // #transpose pointer within diagonal block // #i.e., from A = A(tx,ty), transpose to A = A(ty,tx). A = A - tx - ty*lda + tx*lda + ty; // sum #column above diagonal for (int j=0; j < tx; j++) { res += MAGMA_D_ABS( *A ); A += 1; //# } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_D_REAL( *A )); A += lda; //# // sum #row right of diagonal for (int j=tx+1; j < n_mod_bs; j++) { res += MAGMA_D_ABS( *A ); A += lda; //# } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_D_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_D_REAL( la[tx][1] ) + MAGMA_D_REAL( la[tx][2] ) + MAGMA_D_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void dlansy_inf( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaDouble_ptr dwork, magma_queue_t queue ) { dim3 threads( inf_bs, 4 ); dim3 grid( magma_ceildiv( n, inf_bs ), 1 ); magma_int_t n_full_block = (n - n % inf_bs) / inf_bs; magma_int_t n_mod_bs = n % inf_bs; if ( uplo == MagmaLower) { dlansy_inf_kernel_lower<<< grid, threads, 0, queue->cuda_stream() >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } else { dlansy_inf_kernel_upper<<< grid, threads, 0, queue->cuda_stream() >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } } /* ====================================================================== */ /* max-norm */ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void dlansy_max_kernel_lower( int n, const double * __restrict__ A, int lda, double * __restrict__ dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; double res = 0; if (ind < n) { A += ind; for (int j=0; j < ind; ++j) { res = max_nan( res, MAGMA_D_ABS( *A )); A += lda; } // diagonal element (ignoring imaginary part) res = max_nan( res, MAGMA_D_ABS( MAGMA_D_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */ __global__ void dlansy_max_kernel_upper( int n, const double * __restrict__ A, int lda, double * __restrict__ dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; double res = 0; if (ind < n) { A += ind; A += (n-1)*lda; for (int j=n-1; j > ind; j--) { res = max_nan( res, MAGMA_D_ABS( *A )); A -= lda; } // diagonal element (ignoring imaginary part) res = max_nan( res, MAGMA_D_ABS( MAGMA_D_REAL( *A ))); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void dlansy_max( magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr A, magma_int_t lda, magmaDouble_ptr dwork, magma_queue_t queue ) { dim3 threads( max_bs ); dim3 grid( magma_ceildiv( n, max_bs ) ); if ( uplo == MagmaLower ) { dlansy_max_kernel_lower<<< grid, threads, 0, queue->cuda_stream() >>> ( n, A, lda, dwork ); } else { dlansy_max_kernel_upper<<< grid, threads, 0, queue->cuda_stream() >>> ( n, A, lda, dwork ); } } /* ====================================================================== */ /** Purpose ------- DLANSY returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric matrix A. DLANSY = ( max(abs(A(i,j))), NORM = MagmaMaxNorm ( ( norm1(A), NORM = MagmaOneNorm ( ( normI(A), NORM = MagmaInfNorm ( ( normF(A), NORM = MagmaFrobeniusNorm ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. On error, returns DLANSY < 0: if DLANSY = -i, the i-th argument had an illegal value. Arguments: ---------- @param[in] norm magma_norm_t Specifies the value to be returned in DLANSY as described above. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced. - = MagmaUpper: Upper triangular part of A is referenced - = MagmaLower: Lower triangular part of A is referenced @param[in] n INTEGER The order of the matrix A. N >= 0. When N = 0, DLANSY is set to zero. @param[in] dA DOUBLE PRECISION array on the GPU, dimension (LDDA,N) The symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(N,1). @param dwork (workspace) DOUBLE PRECISION array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is required only for norm1 and normI. Here max-norm also requires WORK. @param[in] lwork INTEGER The dimension of the array DWORK. LWORK >= max( 1, N ). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" double magmablas_dlansy_q( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dwork, magma_int_t lwork, magma_queue_t queue ) { magma_int_t info = 0; // 1-norm == inf-norm since A is symmetric bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm); bool max_norm = (norm == MagmaMaxNorm); // inf_norm Double-Complex requires > 16 KB shared data (arch >= 200) #if defined(PRECISION_z) const bool inf_implemented = (magma_getdevice_arch() >= 200); #else const bool inf_implemented = true; #endif if ( ! (max_norm || (inf_norm && inf_implemented)) ) info = -1; else if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < n ) info = -5; else if ( lwork < n ) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; double res = 0; if ( inf_norm ) { dlansy_inf( uplo, n, dA, ldda, dwork, queue ); } else { dlansy_max( uplo, n, dA, ldda, dwork, queue ); } magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( n, dwork ); magma_dgetvector( 1, &dwork[0], 1, &res, 1, queue ); return res; }
8f44fcea94b5b9e39c559a0ade74590915a744d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ********************************************** * CS314 Principles of Programming Languages * * Fall 2020 * ********************************************** */ #include "utils.hpp" #include "gpuHeaders.cuh" #include "extra.cu" #include <iostream> using namespace std; #define threadsPerBlock 256 int one_way_handshake(GraphData graph, int *& matches, int numthreads, bool extra_credit) { int num_thread_blocks = (numthreads + threadsPerBlock - 1) / threadsPerBlock; int numVertices = graph.numNodes; int numEdges = graph.numEdges; //Prepare various GPU arrays that we're going to need: int * strongNeighbor_gpu;//will hold strongest neighbor for each vertex int * matches_gpu;//will hold the output int * src_gpu;//holds the src nodes in edge list int * dst_gpu;//holds the dst nodes in edge list int * weight_gpu;//holds the edge weights in edge list int * temp1_gpu;//a temporary array for data we don't need to keep for long int * temp2_gpu;//a temporary array for data we don't need to keep for long int * temp3_gpu;//a temporary array for data we don't need to keep for long int * temp4_gpu;//a temporary array for data we don't need to keep for long /** YOUR CODE GOES BELOW (allocate GPU memory, and copy from CPU to GPU as appropriate **/ // Allocate strongNeighbor_gpu size on the GPU hipMalloc((void**)&strongNeighbor_gpu, sizeof(int) * numVertices); // Allocate and copy data to gpu for matches hipMalloc((void**)&matches_gpu, sizeof(int) * numVertices); hipMemcpy(matches_gpu, matches, sizeof(int) * numVertices, hipMemcpyHostToDevice); // Allocate and copy data to gpu for src hipMalloc((void**)&src_gpu, sizeof(int) * numEdges); hipMemcpy(src_gpu, graph.src, sizeof(int) * numEdges, hipMemcpyHostToDevice); // Allocate and copy data to gpu for dst hipMalloc((void**)&dst_gpu, sizeof(int) * numEdges); hipMemcpy(dst_gpu, graph.dst, sizeof(int) * numEdges, hipMemcpyHostToDevice); // Allocate and copy data to gpu for weight hipMalloc((void**)&weight_gpu, sizeof(int) * numEdges); hipMemcpy(weight_gpu, graph.weight, sizeof(int) * numEdges, hipMemcpyHostToDevice); // Allocate temp1 to gpu hipMalloc((void**)&temp1_gpu, sizeof(int) * (numEdges + 1)); // Allocate temp2 to gpu hipMalloc((void**)&temp2_gpu, sizeof(int) * (numEdges + 1)); // Allocate temp3 to gpu hipMalloc((void**)&temp3_gpu, sizeof(int) * (numEdges + 1)); // Allocate temp4 to gpu hipMalloc((void**)&temp4_gpu, sizeof(int) * (numEdges + 1)); /** YOUR CODE GOES ABOVE **/ //matching loop int iter; for (iter = 0; ; iter++) { if(extra_credit) { /** YOUR CODE GOES BELOW (extra credit) **/ /** YOUR CODE GOES ABOVE (extra credit) **/ } else { //Step 1: Get strongest neighbor for each vertex/node int * strongNeighbor_cpu = (int *) malloc(sizeof(int) * numVertices); int * strongNeighborWeight_cpu = (int *) malloc(sizeof(int) * numVertices); for(int x = 0; x < numVertices; x++) { strongNeighbor_cpu[x] = -1; } for(int x = 0; x < numEdges; x++) { int src = graph.src[x]; int dst = graph.dst[x]; int wgt = graph.weight[x]; //std::cerr << src << "->" << dst << ": " << wgt << "\n"; if(strongNeighbor_cpu[src] == -1 || strongNeighborWeight_cpu[src] < wgt) { strongNeighbor_cpu[src] = dst; strongNeighborWeight_cpu[src] = wgt; } } //move data from CPU to GPU, and free the CPU arrays hipMemcpy(strongNeighbor_gpu, strongNeighbor_cpu, numVertices * sizeof(int), hipMemcpyHostToDevice); free(strongNeighbor_cpu); free(strongNeighborWeight_cpu); } //Step 2: check for each vertex whether there's a handshake hipLaunchKernelGGL(( check_handshaking_gpu), dim3(num_thread_blocks), dim3(threadsPerBlock), 0, 0, strongNeighbor_gpu, matches_gpu, numVertices); // DEBUG /* printf("\nmatches"); int * temp7 = (int *) malloc(sizeof(int) * (numVertices)); hipMemcpy(temp7, matches_gpu, sizeof(int) * (numVertices), hipMemcpyDeviceToHost); for (int i = 0; i < numVertices; i++) { printf(" %d ", temp7[i]); } */ //Step 3: filter //Step 3a: decide which edges to keep (marked with a 1) versus filter (marked with a 0) int * keepEdges_gpu = temp1_gpu; temp1_gpu = NULL; hipLaunchKernelGGL(( markFilterEdges_gpu), dim3(num_thread_blocks), dim3(threadsPerBlock), 0, 0, src_gpu, dst_gpu, matches_gpu, keepEdges_gpu, numEdges); // DEBUG /* printf("\nkeepEdges"); int * temp6 = (int *) malloc(sizeof(int) * (numEdges + 1)); hipMemcpy(temp6, keepEdges_gpu, sizeof(int) * (numEdges + 1), hipMemcpyDeviceToHost); for (int i = 0; i < numEdges + 1; i++) { printf(" %d ", temp6[i]); } */ //Step 3b: get new indices (in edge list for next iteration) of the edges we're going to keep int * newEdgeLocs_gpu = keepEdges_gpu; keepEdges_gpu = NULL; for(int distance = 0; distance <= numEdges; distance = max(1, distance * 2)) { hipLaunchKernelGGL(( exclusive_prefix_sum_gpu), dim3(num_thread_blocks), dim3(threadsPerBlock), 0, 0, newEdgeLocs_gpu, temp2_gpu, distance, numEdges+1); swapArray((void**) &newEdgeLocs_gpu, (void**) &temp2_gpu); } //note: temp1 is still in use, until we're done with newEdgeLocs_gpu // DEBUG /* printf("\nnewEdgeLocs"); int * temp5 = (int *) malloc(sizeof(int) * (numEdges + 1)); hipMemcpy(temp5, newEdgeLocs_gpu, sizeof(int) * (numEdges + 1), hipMemcpyDeviceToHost); for (int i = 0; i < numEdges + 1; i++) { printf(" %d ", temp5[i]); } */ //Step 3c: check if we're done matching int lastLoc = 0; hipMemcpy(&lastLoc, &(newEdgeLocs_gpu[numEdges]), sizeof(int), hipMemcpyDeviceToHost); if(lastLoc < 2) { //termination: fewer than two nodes remain unmatched break; } else if(lastLoc == numEdges) { //termination: no additional matches are possible break; } //Step 3d: pack the src, dst, and weight arrays in accordance with new edge locations hipLaunchKernelGGL(( packGraph_gpu), dim3(num_thread_blocks), dim3(threadsPerBlock), 0, 0, temp2_gpu, src_gpu, temp3_gpu, dst_gpu, temp4_gpu, weight_gpu, newEdgeLocs_gpu, numEdges); swapArray((void**) &temp2_gpu, (void**) &src_gpu); swapArray((void**) &temp3_gpu, (void**) &dst_gpu); swapArray((void**) &temp4_gpu, (void**) &weight_gpu); temp1_gpu = newEdgeLocs_gpu; newEdgeLocs_gpu = NULL; //note: now we're done with the current contents of all the temporary arrays //Set new number of edges: numEdges = lastLoc; if(iter > numVertices) { cerr << "Error: matching has been running too long; breaking loop now\n"; break; } if(!extra_credit) { //Step 4: Copy new graph arrays to CPU hipMemcpy(graph.src, src_gpu, numEdges * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(graph.dst, dst_gpu, numEdges * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(graph.weight, weight_gpu, numEdges * sizeof(int), hipMemcpyDeviceToHost); } } hipMemcpy(matches, matches_gpu, numVertices * sizeof(int), hipMemcpyDeviceToHost); //Wait until pending GPU operations are complete: hipDeviceSynchronize(); //free GPU arrays /** YOUR CODE GOES BELOW **/ // Free strongNeighbor_gpu hipFree(strongNeighbor_gpu); // Free matches_gpu hipFree(matches_gpu); // Free src_gpu hipFree(src_gpu); // Free dst_gpu hipFree(dst_gpu); // Free weight_gpu hipFree(weight_gpu); // Free temp1, temp2, temp3, temp4 hipFree(temp1_gpu); hipFree(temp2_gpu); hipFree(temp3_gpu); hipFree(temp4_gpu); /** YOUR CODE GOES ABOVE **/ hipError_t hipError_t; hipError_t = hipGetLastError(); if(hipError_t != hipSuccess) { cerr << "Warning: one or more CUDA errors occurred. Try using cuda-gdb to debug. Error message: \n\t" <<hipGetErrorString(hipError_t) << "\n"; } return iter + 1; } void one_way_handshake_wrapper(GraphData graph, int *& matches, int numthreads, bool extra_credit) { fprintf(stderr, "Start One Way Matching ... \n"); struct timeval beginTime, endTime; setTime(&beginTime); int iter = one_way_handshake(graph, matches, numthreads, extra_credit); setTime(&endTime); fprintf(stderr, "Done matching.\n"); fprintf(stderr, "Performed matching for %d iterations\n", iter); fprintf(stderr, "One Way Handshaking Matching Time: %.2f ms\n", getTime(&beginTime, &endTime)); }
8f44fcea94b5b9e39c559a0ade74590915a744d8.cu
/* ********************************************** * CS314 Principles of Programming Languages * * Fall 2020 * ********************************************** */ #include "utils.hpp" #include "gpuHeaders.cuh" #include "extra.cu" #include <iostream> using namespace std; #define threadsPerBlock 256 int one_way_handshake(GraphData graph, int *& matches, int numthreads, bool extra_credit) { int num_thread_blocks = (numthreads + threadsPerBlock - 1) / threadsPerBlock; int numVertices = graph.numNodes; int numEdges = graph.numEdges; //Prepare various GPU arrays that we're going to need: int * strongNeighbor_gpu;//will hold strongest neighbor for each vertex int * matches_gpu;//will hold the output int * src_gpu;//holds the src nodes in edge list int * dst_gpu;//holds the dst nodes in edge list int * weight_gpu;//holds the edge weights in edge list int * temp1_gpu;//a temporary array for data we don't need to keep for long int * temp2_gpu;//a temporary array for data we don't need to keep for long int * temp3_gpu;//a temporary array for data we don't need to keep for long int * temp4_gpu;//a temporary array for data we don't need to keep for long /** YOUR CODE GOES BELOW (allocate GPU memory, and copy from CPU to GPU as appropriate **/ // Allocate strongNeighbor_gpu size on the GPU cudaMalloc((void**)&strongNeighbor_gpu, sizeof(int) * numVertices); // Allocate and copy data to gpu for matches cudaMalloc((void**)&matches_gpu, sizeof(int) * numVertices); cudaMemcpy(matches_gpu, matches, sizeof(int) * numVertices, cudaMemcpyHostToDevice); // Allocate and copy data to gpu for src cudaMalloc((void**)&src_gpu, sizeof(int) * numEdges); cudaMemcpy(src_gpu, graph.src, sizeof(int) * numEdges, cudaMemcpyHostToDevice); // Allocate and copy data to gpu for dst cudaMalloc((void**)&dst_gpu, sizeof(int) * numEdges); cudaMemcpy(dst_gpu, graph.dst, sizeof(int) * numEdges, cudaMemcpyHostToDevice); // Allocate and copy data to gpu for weight cudaMalloc((void**)&weight_gpu, sizeof(int) * numEdges); cudaMemcpy(weight_gpu, graph.weight, sizeof(int) * numEdges, cudaMemcpyHostToDevice); // Allocate temp1 to gpu cudaMalloc((void**)&temp1_gpu, sizeof(int) * (numEdges + 1)); // Allocate temp2 to gpu cudaMalloc((void**)&temp2_gpu, sizeof(int) * (numEdges + 1)); // Allocate temp3 to gpu cudaMalloc((void**)&temp3_gpu, sizeof(int) * (numEdges + 1)); // Allocate temp4 to gpu cudaMalloc((void**)&temp4_gpu, sizeof(int) * (numEdges + 1)); /** YOUR CODE GOES ABOVE **/ //matching loop int iter; for (iter = 0; ; iter++) { if(extra_credit) { /** YOUR CODE GOES BELOW (extra credit) **/ /** YOUR CODE GOES ABOVE (extra credit) **/ } else { //Step 1: Get strongest neighbor for each vertex/node int * strongNeighbor_cpu = (int *) malloc(sizeof(int) * numVertices); int * strongNeighborWeight_cpu = (int *) malloc(sizeof(int) * numVertices); for(int x = 0; x < numVertices; x++) { strongNeighbor_cpu[x] = -1; } for(int x = 0; x < numEdges; x++) { int src = graph.src[x]; int dst = graph.dst[x]; int wgt = graph.weight[x]; //std::cerr << src << "->" << dst << ": " << wgt << "\n"; if(strongNeighbor_cpu[src] == -1 || strongNeighborWeight_cpu[src] < wgt) { strongNeighbor_cpu[src] = dst; strongNeighborWeight_cpu[src] = wgt; } } //move data from CPU to GPU, and free the CPU arrays cudaMemcpy(strongNeighbor_gpu, strongNeighbor_cpu, numVertices * sizeof(int), cudaMemcpyHostToDevice); free(strongNeighbor_cpu); free(strongNeighborWeight_cpu); } //Step 2: check for each vertex whether there's a handshake check_handshaking_gpu<<<num_thread_blocks, threadsPerBlock>>>(strongNeighbor_gpu, matches_gpu, numVertices); // DEBUG /* printf("\nmatches"); int * temp7 = (int *) malloc(sizeof(int) * (numVertices)); cudaMemcpy(temp7, matches_gpu, sizeof(int) * (numVertices), cudaMemcpyDeviceToHost); for (int i = 0; i < numVertices; i++) { printf(" %d ", temp7[i]); } */ //Step 3: filter //Step 3a: decide which edges to keep (marked with a 1) versus filter (marked with a 0) int * keepEdges_gpu = temp1_gpu; temp1_gpu = NULL; markFilterEdges_gpu<<<num_thread_blocks, threadsPerBlock>>>(src_gpu, dst_gpu, matches_gpu, keepEdges_gpu, numEdges); // DEBUG /* printf("\nkeepEdges"); int * temp6 = (int *) malloc(sizeof(int) * (numEdges + 1)); cudaMemcpy(temp6, keepEdges_gpu, sizeof(int) * (numEdges + 1), cudaMemcpyDeviceToHost); for (int i = 0; i < numEdges + 1; i++) { printf(" %d ", temp6[i]); } */ //Step 3b: get new indices (in edge list for next iteration) of the edges we're going to keep int * newEdgeLocs_gpu = keepEdges_gpu; keepEdges_gpu = NULL; for(int distance = 0; distance <= numEdges; distance = max(1, distance * 2)) { exclusive_prefix_sum_gpu<<<num_thread_blocks, threadsPerBlock>>>(newEdgeLocs_gpu, temp2_gpu, distance, numEdges+1); swapArray((void**) &newEdgeLocs_gpu, (void**) &temp2_gpu); } //note: temp1 is still in use, until we're done with newEdgeLocs_gpu // DEBUG /* printf("\nnewEdgeLocs"); int * temp5 = (int *) malloc(sizeof(int) * (numEdges + 1)); cudaMemcpy(temp5, newEdgeLocs_gpu, sizeof(int) * (numEdges + 1), cudaMemcpyDeviceToHost); for (int i = 0; i < numEdges + 1; i++) { printf(" %d ", temp5[i]); } */ //Step 3c: check if we're done matching int lastLoc = 0; cudaMemcpy(&lastLoc, &(newEdgeLocs_gpu[numEdges]), sizeof(int), cudaMemcpyDeviceToHost); if(lastLoc < 2) { //termination: fewer than two nodes remain unmatched break; } else if(lastLoc == numEdges) { //termination: no additional matches are possible break; } //Step 3d: pack the src, dst, and weight arrays in accordance with new edge locations packGraph_gpu<<<num_thread_blocks, threadsPerBlock>>>(temp2_gpu, src_gpu, temp3_gpu, dst_gpu, temp4_gpu, weight_gpu, newEdgeLocs_gpu, numEdges); swapArray((void**) &temp2_gpu, (void**) &src_gpu); swapArray((void**) &temp3_gpu, (void**) &dst_gpu); swapArray((void**) &temp4_gpu, (void**) &weight_gpu); temp1_gpu = newEdgeLocs_gpu; newEdgeLocs_gpu = NULL; //note: now we're done with the current contents of all the temporary arrays //Set new number of edges: numEdges = lastLoc; if(iter > numVertices) { cerr << "Error: matching has been running too long; breaking loop now\n"; break; } if(!extra_credit) { //Step 4: Copy new graph arrays to CPU cudaMemcpy(graph.src, src_gpu, numEdges * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(graph.dst, dst_gpu, numEdges * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(graph.weight, weight_gpu, numEdges * sizeof(int), cudaMemcpyDeviceToHost); } } cudaMemcpy(matches, matches_gpu, numVertices * sizeof(int), cudaMemcpyDeviceToHost); //Wait until pending GPU operations are complete: cudaDeviceSynchronize(); //free GPU arrays /** YOUR CODE GOES BELOW **/ // Free strongNeighbor_gpu cudaFree(strongNeighbor_gpu); // Free matches_gpu cudaFree(matches_gpu); // Free src_gpu cudaFree(src_gpu); // Free dst_gpu cudaFree(dst_gpu); // Free weight_gpu cudaFree(weight_gpu); // Free temp1, temp2, temp3, temp4 cudaFree(temp1_gpu); cudaFree(temp2_gpu); cudaFree(temp3_gpu); cudaFree(temp4_gpu); /** YOUR CODE GOES ABOVE **/ cudaError_t cudaError; cudaError = cudaGetLastError(); if(cudaError != cudaSuccess) { cerr << "Warning: one or more CUDA errors occurred. Try using cuda-gdb to debug. Error message: \n\t" <<cudaGetErrorString(cudaError) << "\n"; } return iter + 1; } void one_way_handshake_wrapper(GraphData graph, int *& matches, int numthreads, bool extra_credit) { fprintf(stderr, "Start One Way Matching ... \n"); struct timeval beginTime, endTime; setTime(&beginTime); int iter = one_way_handshake(graph, matches, numthreads, extra_credit); setTime(&endTime); fprintf(stderr, "Done matching.\n"); fprintf(stderr, "Performed matching for %d iterations\n", iter); fprintf(stderr, "One Way Handshaking Matching Time: %.2f ms\n", getTime(&beginTime, &endTime)); }
95243747138c037415778d71d12900ced5531b66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // CUDA implementation of FIM (Fast Iterative Method) for Eikonal equations // // Copyright (c) Won-Ki Jeong ([email protected]) // // 2016. 2. 4 // #include "kernel.h" __device__ DOUBLE get_time_eikonal(DOUBLE a, DOUBLE b, DOUBLE c, DOUBLE s) { DOUBLE ret, tmp; // a > b > c if(a < b) { tmp = a; a = b; b = tmp; } if(b < c) { tmp = b; b = c; c = tmp; } if(a < b) { tmp = a; a = b; b = tmp; } ret = INF; if(c < INF) { ret = c + s; if(ret > b) { tmp = ((b+c) + sqrtf(2.0f*s*s-(b-c)*(b-c)))*0.5f; if(tmp > b) ret = tmp; if(ret > a) { tmp = (a+b+c)/3.0f + sqrtf(2.0f*(a*(b-a)+b*(c-b)+c*(a-c))+3.0f*s*s)/3.0f; if(tmp > a) ret = tmp; } } } return ret; } __global__ void run_solver( const double*__restrict__ spd, const bool*__restrict__ mask, const DOUBLE *__restrict__ sol_in, DOUBLE *__restrict__ sol_out, bool *__restrict__ con, const uint*__restrict__ list, int xdim, int ydim, int zdim, int nIter, uint nActiveBlock) { uint list_idx = blockIdx.y*gridDim.x + blockIdx.x; if(list_idx < nActiveBlock) { // retrieve actual block index from the active list uint block_idx = list[list_idx]; double F; bool isValid; uint blocksize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH; uint base_addr = block_idx*blocksize; uint xgridlength = xdim/BLOCK_LENGTH; uint ygridlength = ydim/BLOCK_LENGTH; uint zgridlength = zdim/BLOCK_LENGTH; // compute block index uint bx = block_idx%xgridlength; uint tmpIdx = (block_idx - bx)/xgridlength; uint by = tmpIdx%ygridlength; uint bz = (tmpIdx-by)/ygridlength; uint tx = threadIdx.x; uint ty = threadIdx.y; uint tz = threadIdx.z; uint tIdx = tz*BLOCK_LENGTH*BLOCK_LENGTH + ty*BLOCK_LENGTH + tx; __shared__ DOUBLE _sol[BLOCK_LENGTH+2][BLOCK_LENGTH+2][BLOCK_LENGTH+2]; // copy global to shared memory dim3 idx(tx+1,ty+1,tz+1); SOL(idx.x,idx.y,idx.z) = sol_in[base_addr + tIdx]; F = spd[base_addr + tIdx]; if(F > 0) F = 1.0/F; // F = 1/f isValid = mask[base_addr + tIdx]; uint new_base_addr, new_tIdx; // 1-neighborhood values if(tx == 0) { if(bx == 0) // end of the grid { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + BLOCK_LENGTH-1; new_base_addr = (block_idx - 1)*blocksize; } SOL(tx,idx.y,idx.z) = sol_in[new_base_addr + new_tIdx]; } if(tx == BLOCK_LENGTH-1) { if(bx == xgridlength-1) // end of the grid { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1); new_base_addr = (block_idx + 1)*blocksize; } SOL(tx+2,idx.y,idx.z) = sol_in[new_base_addr + new_tIdx]; } if(ty == 0) { if(by == 0) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + (BLOCK_LENGTH-1)*BLOCK_LENGTH; new_base_addr = (block_idx - xgridlength)*blocksize; } SOL(idx.x,ty,idx.z) = sol_in[new_base_addr + new_tIdx]; } if(ty == BLOCK_LENGTH-1) { if(by == ygridlength-1) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1)*BLOCK_LENGTH; new_base_addr = (block_idx + xgridlength)*blocksize; } SOL(idx.x,ty+2,idx.z) = sol_in[new_base_addr + new_tIdx]; } if(tz == 0) { if(bz == 0) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + (BLOCK_LENGTH-1)*BLOCK_LENGTH*BLOCK_LENGTH; new_base_addr = (block_idx - xgridlength*ygridlength)*blocksize; } SOL(idx.x,idx.y,tz) = sol_in[new_base_addr + new_tIdx]; } if(tz == BLOCK_LENGTH-1) { if(bz == zgridlength-1) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1)*BLOCK_LENGTH*BLOCK_LENGTH; new_base_addr = (block_idx + xgridlength*ygridlength)*blocksize; } SOL(idx.x,idx.y,tz+2) = sol_in[new_base_addr + new_tIdx]; } __syncthreads(); DOUBLE a,b,c,oldT,newT; for(int iter=0; iter<nIter; iter++) { // // compute new value // oldT = newT = SOL(idx.x,idx.y,idx.z); if(isValid) { a = min(SOL(tx,idx.y,idx.z),SOL(tx+2,idx.y,idx.z)); b = min(SOL(idx.x,ty,idx.z),SOL(idx.x,ty+2,idx.z)); c = min(SOL(idx.x,idx.y,tz),SOL(idx.x,idx.y,tz+2)); DOUBLE tmp = (DOUBLE) get_time_eikonal(a, b, c, F); newT = min(tmp,oldT); } __syncthreads(); if(isValid) SOL(idx.x,idx.y,idx.z) = newT; __syncthreads(); // this may not required } DOUBLE residue = oldT - newT; // write back to global memory con[base_addr + tIdx] = (residue < EPS) ? true : false; sol_out[base_addr + tIdx] = newT; } } __global__ void run_reduction( const bool *__restrict__ con, bool *__restrict__ listVol, const uint *__restrict__ list, uint nActiveBlock) { uint list_idx = blockIdx.y*gridDim.x + blockIdx.x; if(list_idx < nActiveBlock) { uint block_idx = list[list_idx]; __shared__ bool conv[BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH]; uint blocksize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH/2; uint base_addr = block_idx*blocksize*2; uint tx = threadIdx.x; uint ty = threadIdx.y; uint tz = threadIdx.z; uint tIdx = tz*BLOCK_LENGTH*BLOCK_LENGTH + ty*BLOCK_LENGTH + tx; conv[tIdx] = con[base_addr + tIdx]; conv[tIdx + blocksize] = con[base_addr + tIdx + blocksize]; __syncthreads(); for(uint i=blocksize; i>0; i/=2) { if(tIdx < i) { bool b1, b2; b1 = conv[tIdx]; b2 = conv[tIdx+i]; conv[tIdx] = (b1 && b2) ? true : false ; } __syncthreads(); } if(tIdx == 0) { listVol[block_idx] = !conv[0]; // active list is negation of tile convergence (active = not converged) } } } __global__ void run_check_neighbor( const double*__restrict__ spd, const bool*__restrict__ mask, const DOUBLE *__restrict__ sol_in, DOUBLE *__restrict__ sol_out, bool *__restrict__ con, const uint*__restrict__ list, int xdim, int ydim, int zdim, uint nActiveBlock, uint nTotalBlock) { uint list_idx = blockIdx.y*gridDim.x + blockIdx.x; if(list_idx < nTotalBlock) { double F; bool isValid; __shared__ DOUBLE _sol[BLOCK_LENGTH+2][BLOCK_LENGTH+2][BLOCK_LENGTH+2]; uint block_idx = list[list_idx]; uint blocksize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH; uint base_addr = block_idx*blocksize; uint tx = threadIdx.x; uint ty = threadIdx.y; uint tz = threadIdx.z; uint tIdx = tz*BLOCK_LENGTH*BLOCK_LENGTH + ty*BLOCK_LENGTH + tx; if(list_idx < nActiveBlock) // copy value { sol_out[base_addr + tIdx] = sol_in[base_addr + tIdx]; } else { uint xgridlength = xdim/BLOCK_LENGTH; uint ygridlength = ydim/BLOCK_LENGTH; uint zgridlength = zdim/BLOCK_LENGTH; // compute block index uint bx = block_idx%xgridlength; uint tmpIdx = (block_idx - bx)/xgridlength; uint by = tmpIdx%ygridlength; uint bz = (tmpIdx-by)/ygridlength; // copy global to shared memory dim3 idx(tx+1,ty+1,tz+1); _sol[idx.x][idx.y][idx.z] = sol_in[base_addr + tIdx]; F = spd[base_addr + tIdx]; if(F > 0) F = 1.0/F; isValid = mask[base_addr + tIdx]; uint new_base_addr, new_tIdx; // 1-neighborhood values if(tx == 0) { if(bx == 0) // end of the grid { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + BLOCK_LENGTH-1; new_base_addr = (block_idx - 1)*blocksize; } _sol[tx][idx.y][idx.z] = sol_in[new_base_addr + new_tIdx]; } if(tx == BLOCK_LENGTH-1) { if(bx == xgridlength-1) // end of the grid { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1); new_base_addr = (block_idx + 1)*blocksize; } _sol[tx+2][idx.y][idx.z] = sol_in[new_base_addr + new_tIdx]; } if(ty == 0) { if(by == 0) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + (BLOCK_LENGTH-1)*BLOCK_LENGTH; new_base_addr = (block_idx - xgridlength)*blocksize; } _sol[idx.x][ty][idx.z] = sol_in[new_base_addr + new_tIdx]; } if(ty == BLOCK_LENGTH-1) { if(by == ygridlength-1) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1)*BLOCK_LENGTH; new_base_addr = (block_idx + xgridlength)*blocksize; } _sol[idx.x][ty+2][idx.z] = sol_in[new_base_addr + new_tIdx]; } if(tz == 0) { if(bz == 0) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + (BLOCK_LENGTH-1)*BLOCK_LENGTH*BLOCK_LENGTH; new_base_addr = (block_idx - xgridlength*ygridlength)*blocksize; } _sol[idx.x][idx.y][tz] = sol_in[new_base_addr + new_tIdx]; } if(tz == BLOCK_LENGTH-1) { if(bz == zgridlength-1) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1)*BLOCK_LENGTH*BLOCK_LENGTH; new_base_addr = (block_idx + xgridlength*ygridlength)*blocksize; } _sol[idx.x][idx.y][tz+2] = sol_in[new_base_addr + new_tIdx]; } __syncthreads(); DOUBLE a,b,c,oldT,newT; // // compute new value // oldT = newT = _sol[idx.x][idx.y][idx.z]; if(isValid) { a = min(_sol[tx][idx.y][idx.z],_sol[tx+2][idx.y][idx.z]); b = min(_sol[idx.x][ty][idx.z],_sol[idx.x][ty+2][idx.z]); c = min(_sol[idx.x][idx.y][tz],_sol[idx.x][idx.y][tz+2]); DOUBLE tmp = (DOUBLE) get_time_eikonal(a, b, c, F); newT = min(tmp,oldT); sol_out[base_addr + tIdx] = newT; } // write back to global memory DOUBLE residue = oldT - newT; con[base_addr + tIdx] = (residue < EPS) ? true : false; } } }
95243747138c037415778d71d12900ced5531b66.cu
// // CUDA implementation of FIM (Fast Iterative Method) for Eikonal equations // // Copyright (c) Won-Ki Jeong ([email protected]) // // 2016. 2. 4 // #include "kernel.h" __device__ DOUBLE get_time_eikonal(DOUBLE a, DOUBLE b, DOUBLE c, DOUBLE s) { DOUBLE ret, tmp; // a > b > c if(a < b) { tmp = a; a = b; b = tmp; } if(b < c) { tmp = b; b = c; c = tmp; } if(a < b) { tmp = a; a = b; b = tmp; } ret = INF; if(c < INF) { ret = c + s; if(ret > b) { tmp = ((b+c) + sqrtf(2.0f*s*s-(b-c)*(b-c)))*0.5f; if(tmp > b) ret = tmp; if(ret > a) { tmp = (a+b+c)/3.0f + sqrtf(2.0f*(a*(b-a)+b*(c-b)+c*(a-c))+3.0f*s*s)/3.0f; if(tmp > a) ret = tmp; } } } return ret; } __global__ void run_solver( const double*__restrict__ spd, const bool*__restrict__ mask, const DOUBLE *__restrict__ sol_in, DOUBLE *__restrict__ sol_out, bool *__restrict__ con, const uint*__restrict__ list, int xdim, int ydim, int zdim, int nIter, uint nActiveBlock) { uint list_idx = blockIdx.y*gridDim.x + blockIdx.x; if(list_idx < nActiveBlock) { // retrieve actual block index from the active list uint block_idx = list[list_idx]; double F; bool isValid; uint blocksize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH; uint base_addr = block_idx*blocksize; uint xgridlength = xdim/BLOCK_LENGTH; uint ygridlength = ydim/BLOCK_LENGTH; uint zgridlength = zdim/BLOCK_LENGTH; // compute block index uint bx = block_idx%xgridlength; uint tmpIdx = (block_idx - bx)/xgridlength; uint by = tmpIdx%ygridlength; uint bz = (tmpIdx-by)/ygridlength; uint tx = threadIdx.x; uint ty = threadIdx.y; uint tz = threadIdx.z; uint tIdx = tz*BLOCK_LENGTH*BLOCK_LENGTH + ty*BLOCK_LENGTH + tx; __shared__ DOUBLE _sol[BLOCK_LENGTH+2][BLOCK_LENGTH+2][BLOCK_LENGTH+2]; // copy global to shared memory dim3 idx(tx+1,ty+1,tz+1); SOL(idx.x,idx.y,idx.z) = sol_in[base_addr + tIdx]; F = spd[base_addr + tIdx]; if(F > 0) F = 1.0/F; // F = 1/f isValid = mask[base_addr + tIdx]; uint new_base_addr, new_tIdx; // 1-neighborhood values if(tx == 0) { if(bx == 0) // end of the grid { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + BLOCK_LENGTH-1; new_base_addr = (block_idx - 1)*blocksize; } SOL(tx,idx.y,idx.z) = sol_in[new_base_addr + new_tIdx]; } if(tx == BLOCK_LENGTH-1) { if(bx == xgridlength-1) // end of the grid { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1); new_base_addr = (block_idx + 1)*blocksize; } SOL(tx+2,idx.y,idx.z) = sol_in[new_base_addr + new_tIdx]; } if(ty == 0) { if(by == 0) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + (BLOCK_LENGTH-1)*BLOCK_LENGTH; new_base_addr = (block_idx - xgridlength)*blocksize; } SOL(idx.x,ty,idx.z) = sol_in[new_base_addr + new_tIdx]; } if(ty == BLOCK_LENGTH-1) { if(by == ygridlength-1) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1)*BLOCK_LENGTH; new_base_addr = (block_idx + xgridlength)*blocksize; } SOL(idx.x,ty+2,idx.z) = sol_in[new_base_addr + new_tIdx]; } if(tz == 0) { if(bz == 0) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + (BLOCK_LENGTH-1)*BLOCK_LENGTH*BLOCK_LENGTH; new_base_addr = (block_idx - xgridlength*ygridlength)*blocksize; } SOL(idx.x,idx.y,tz) = sol_in[new_base_addr + new_tIdx]; } if(tz == BLOCK_LENGTH-1) { if(bz == zgridlength-1) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1)*BLOCK_LENGTH*BLOCK_LENGTH; new_base_addr = (block_idx + xgridlength*ygridlength)*blocksize; } SOL(idx.x,idx.y,tz+2) = sol_in[new_base_addr + new_tIdx]; } __syncthreads(); DOUBLE a,b,c,oldT,newT; for(int iter=0; iter<nIter; iter++) { // // compute new value // oldT = newT = SOL(idx.x,idx.y,idx.z); if(isValid) { a = min(SOL(tx,idx.y,idx.z),SOL(tx+2,idx.y,idx.z)); b = min(SOL(idx.x,ty,idx.z),SOL(idx.x,ty+2,idx.z)); c = min(SOL(idx.x,idx.y,tz),SOL(idx.x,idx.y,tz+2)); DOUBLE tmp = (DOUBLE) get_time_eikonal(a, b, c, F); newT = min(tmp,oldT); } __syncthreads(); if(isValid) SOL(idx.x,idx.y,idx.z) = newT; __syncthreads(); // this may not required } DOUBLE residue = oldT - newT; // write back to global memory con[base_addr + tIdx] = (residue < EPS) ? true : false; sol_out[base_addr + tIdx] = newT; } } __global__ void run_reduction( const bool *__restrict__ con, bool *__restrict__ listVol, const uint *__restrict__ list, uint nActiveBlock) { uint list_idx = blockIdx.y*gridDim.x + blockIdx.x; if(list_idx < nActiveBlock) { uint block_idx = list[list_idx]; __shared__ bool conv[BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH]; uint blocksize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH/2; uint base_addr = block_idx*blocksize*2; uint tx = threadIdx.x; uint ty = threadIdx.y; uint tz = threadIdx.z; uint tIdx = tz*BLOCK_LENGTH*BLOCK_LENGTH + ty*BLOCK_LENGTH + tx; conv[tIdx] = con[base_addr + tIdx]; conv[tIdx + blocksize] = con[base_addr + tIdx + blocksize]; __syncthreads(); for(uint i=blocksize; i>0; i/=2) { if(tIdx < i) { bool b1, b2; b1 = conv[tIdx]; b2 = conv[tIdx+i]; conv[tIdx] = (b1 && b2) ? true : false ; } __syncthreads(); } if(tIdx == 0) { listVol[block_idx] = !conv[0]; // active list is negation of tile convergence (active = not converged) } } } __global__ void run_check_neighbor( const double*__restrict__ spd, const bool*__restrict__ mask, const DOUBLE *__restrict__ sol_in, DOUBLE *__restrict__ sol_out, bool *__restrict__ con, const uint*__restrict__ list, int xdim, int ydim, int zdim, uint nActiveBlock, uint nTotalBlock) { uint list_idx = blockIdx.y*gridDim.x + blockIdx.x; if(list_idx < nTotalBlock) { double F; bool isValid; __shared__ DOUBLE _sol[BLOCK_LENGTH+2][BLOCK_LENGTH+2][BLOCK_LENGTH+2]; uint block_idx = list[list_idx]; uint blocksize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH; uint base_addr = block_idx*blocksize; uint tx = threadIdx.x; uint ty = threadIdx.y; uint tz = threadIdx.z; uint tIdx = tz*BLOCK_LENGTH*BLOCK_LENGTH + ty*BLOCK_LENGTH + tx; if(list_idx < nActiveBlock) // copy value { sol_out[base_addr + tIdx] = sol_in[base_addr + tIdx]; } else { uint xgridlength = xdim/BLOCK_LENGTH; uint ygridlength = ydim/BLOCK_LENGTH; uint zgridlength = zdim/BLOCK_LENGTH; // compute block index uint bx = block_idx%xgridlength; uint tmpIdx = (block_idx - bx)/xgridlength; uint by = tmpIdx%ygridlength; uint bz = (tmpIdx-by)/ygridlength; // copy global to shared memory dim3 idx(tx+1,ty+1,tz+1); _sol[idx.x][idx.y][idx.z] = sol_in[base_addr + tIdx]; F = spd[base_addr + tIdx]; if(F > 0) F = 1.0/F; isValid = mask[base_addr + tIdx]; uint new_base_addr, new_tIdx; // 1-neighborhood values if(tx == 0) { if(bx == 0) // end of the grid { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + BLOCK_LENGTH-1; new_base_addr = (block_idx - 1)*blocksize; } _sol[tx][idx.y][idx.z] = sol_in[new_base_addr + new_tIdx]; } if(tx == BLOCK_LENGTH-1) { if(bx == xgridlength-1) // end of the grid { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1); new_base_addr = (block_idx + 1)*blocksize; } _sol[tx+2][idx.y][idx.z] = sol_in[new_base_addr + new_tIdx]; } if(ty == 0) { if(by == 0) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + (BLOCK_LENGTH-1)*BLOCK_LENGTH; new_base_addr = (block_idx - xgridlength)*blocksize; } _sol[idx.x][ty][idx.z] = sol_in[new_base_addr + new_tIdx]; } if(ty == BLOCK_LENGTH-1) { if(by == ygridlength-1) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1)*BLOCK_LENGTH; new_base_addr = (block_idx + xgridlength)*blocksize; } _sol[idx.x][ty+2][idx.z] = sol_in[new_base_addr + new_tIdx]; } if(tz == 0) { if(bz == 0) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx + (BLOCK_LENGTH-1)*BLOCK_LENGTH*BLOCK_LENGTH; new_base_addr = (block_idx - xgridlength*ygridlength)*blocksize; } _sol[idx.x][idx.y][tz] = sol_in[new_base_addr + new_tIdx]; } if(tz == BLOCK_LENGTH-1) { if(bz == zgridlength-1) { new_tIdx = tIdx; new_base_addr = base_addr; } else { new_tIdx = tIdx - (BLOCK_LENGTH-1)*BLOCK_LENGTH*BLOCK_LENGTH; new_base_addr = (block_idx + xgridlength*ygridlength)*blocksize; } _sol[idx.x][idx.y][tz+2] = sol_in[new_base_addr + new_tIdx]; } __syncthreads(); DOUBLE a,b,c,oldT,newT; // // compute new value // oldT = newT = _sol[idx.x][idx.y][idx.z]; if(isValid) { a = min(_sol[tx][idx.y][idx.z],_sol[tx+2][idx.y][idx.z]); b = min(_sol[idx.x][ty][idx.z],_sol[idx.x][ty+2][idx.z]); c = min(_sol[idx.x][idx.y][tz],_sol[idx.x][idx.y][tz+2]); DOUBLE tmp = (DOUBLE) get_time_eikonal(a, b, c, F); newT = min(tmp,oldT); sol_out[base_addr + tIdx] = newT; } // write back to global memory DOUBLE residue = oldT - newT; con[base_addr + tIdx] = (residue < EPS) ? true : false; } } }
09a2652d8a4c10b7319c776c520747d64d50de40.hip
// !!! This is a file automatically generated by hipify!!! #include<time.h> #include<stdlib.h> #include<stdio.h> #include<math.h> #include<GL/glut.h> #include <hip/hip_runtime.h> //#include "timer.h" #define pi 22/7; typedef struct { double x; double y; } points; typedef struct { double x; double y; double largest; } kernel_shared_type; //points pts[100]; //test case 1 points pts[] = { 3.0, 3.0, 4.0, 6.0, 1.0, 2.0, 5.0, 4.0, 7.0, 3.0, 6.0, 6.0, 1.0, 1.0, 2.0, 1.0, 6.0, 7.0, 8.0, 8.0, 5.0, 5.0, 9.0, 4.0, 7.0, 5.0, 1.0, 10.0, 2.0, 7.0, 5.0, 2.0, 2.0, 4.0, 8.0, 6.0, 3.0, 6.0, 4.0, 4.0, 6.0, 8.0, 5.0, 10.0, 1.0, 5.0, 4.0, 3.0, 8.0, 7.0, -2.0, 20.0, 9.0, 0.0, -29.0, -17.0, 17.0, 29.0, 2.0, 3.0, 16.0, 7.0, 19.0, 7.0, 8.0, 9.0, 19.0, 25.0, 23.0, 4.0, 17.0, 27.0, 18.0, 26.0, 6.0, 4.0, 12.0, 28.0, 5.0, 25.0, 2.0, 6.0, 28.0, 28.0, 20.0, 15.0, 16.0, 18.0, 8.0, 12.0, 7.0, 15.0, 3.0, 2.0, 9.0, 1.0, 14.0, 5.0, 4.0, 17.0, 2.0, 18.0, 7.0, 7.0, 14.0, 9.0, -19.0, -23.0, 12.0, -27.0, -15.0, 12, -29, -29, -20, 16, 25.0, 20.0, 14.0, 9.0, 1.0, 3.0, 9.0, 22.0, 6.0, 2.0, 7.0, 14.0, 19.0, 1.0, 17.0, 14.0, 8.0, 15.0, 25.0, 2.0, 18.0, 1.0, 3.0, 18.0 }; //test case 2 //points pts[]={-2.0,3.0,-15.0,-2.0,20.0,1.50,6.0,1.0,14.0,0.0,4.0,-2.30,-8.0,1.0,14.0,3.0,-24.0,-3.0,18.0,-2.0,-5.0,3.0,-36.0,23.0,7.0,6.0,16.0,21.0,32.0,15.0,42.0,19.0,22.0,33.0,25.0,35.0,39.0,29.0,28.0,49.0,43.0,17.0,28.0,20.0,37.0,27.0,23.0,34.0,32.0,13.0,36.0,16.0,24.0,35.0,39.0,29.0,15.0,26.0,43.0,34.0,12.0,32.0,31.0,21.0,35.0}; //horizontal points //test case 3 //points pts[]={2.0,27.0,1.0,-29.0,0.25,-15.62,-0.0,16.45,-1.25,6.0,-1.5,16.0,-1.25,18.0,-1.0,18.75,-2.0,15.0,-1.60,18.0,2.0,-29.0,1.0,-23.0,-2.0,-17.0,-2.0,24.0};//vertical lines //test case 4 //points pts[]={1.0,7.0,23.0,46.0,27.0,16.0,37.0,18.0,9.0,2.0,35.0,18.0,27.0,21.0,7.0,2.0,23.0,32.0,6.0,3.0,25.0,6.0,28.0,17.0,5.0,38.0,48.0,43.0,28.0,27.0,34.0,47.0,39.0,40.0,35.0,8.0,34.0,32.0,3.0,4.0,3.0,5.0,16.0,9.0,35.0,24.0,1.0,24.0,46.0,29.0,43.0,15.0,32.0,5.0,17.0,16.0,26.0,48.0,29.0,30.0,36.0,24.0,4.0,3.0,12.0,12.0,5.0,8.0,27.0,46.0}; points pt1, pt2, pt11, pt22, smallest, largest; int i, j, k, r = 0, l2 = 0, count = 0, f_r = 0, screen1 = 0, screen2 = 0, n, input = 0; //int n; double m, c, temp_c, xx, yy; //m1,c1,m2,c2,temp_c,temp_c1,temp_c2,temp_c3,c_m2,h_m1,h_m2; points subset1[100], subset2[100], result[100]; //,subsubset1[100],subsubset2[100],result[100];; /*void getRandomPoints(int n,int xmax,int ymax) { for (i = 0; i < n; i++) { xx=rand()%ymax; yy=rand()%xmax; pts[i].x=xx; pts[i].y=yy; } //points.push( [ xMax /4 + r * Math.cos(theta), yMax/2 + 2 * r * Math.sin(theta) ] ) } /* var phase = Math.random() * Math.PI * 2; for (var i = 0; i < numPoint/2; i++) { var r = Math.random()*xMax/4; var theta = Math.random() * 1.5 * Math.PI + phase; points.push( [ xMax /4 * 3 + r * Math.cos(theta), yMax/2 + r * Math.sin(theta) ] ) }*/ /*void show(points pts[],int n) { int i; printf("N:%d \n",n);*/ void draw(points a, points c) { glColor3f(0.0, 0.0, 0.0); for (i = 0; i < n; i++) { glBegin(GL_POINTS); glVertex2d(pts[i].x, pts[i].y); glEnd(); } glBegin(GL_LINES); { glVertex2d(a.x, a.y); glVertex2d(c.x, c.y); } glEnd(); } points minimum(points *pts, int n) { points smallest_x = pts[0]; //pt with smallest x // points smallest_y=pts[0]; //pt wit smallest y points smallest; printf("The Points are: \n"); for (i = 0; i < n; i++) printf("(%.2f,%.2f)\n", pts[i]); for (i = 0; i < n; i++) { if (pts[i].x < smallest_x.x) smallest_x = pts[i]; else if (pts[i].x == smallest_x.x) { if (pts[i].y > smallest_x.y) smallest_x = pts[i]; } //if(pts[i].y < smallest_y.y) //smallest_y=pts[i]; } printf("(%.2f,%.2f) is smallest x \n", smallest_x); //printf("(%.2f,%.2f) is smallest y \n",smallest_y); //if((smallest_x.x < smallest_y.x)&&(smallest_x.y < smallest_y.y)) //compares the smallest in x & y and finalize the smallest smallest = smallest_x; //else //smallest=smallest_y; //show(pts,n); printf("(%.2f,%.2f) is smallest \n", smallest); result[f_r++] = smallest; return smallest; } points maximum(points *pts, int n, points smallest) { double distance, largest_distance = 0.0; //finding the largest distance pt double dx, dy; points largest; for (i = 0; i < n; i++) { dx = smallest.x - pts[i].x; //difference in x dy = smallest.y - pts[i].y; //difference in y distance = sqrt((dx * dx) + (dy * dy)); //finding the distance using distance formula if (largest_distance < distance) { largest_distance = distance; largest.x = pts[i].x; largest.y = pts[i].y; } } printf("%.2f is largest distance\n", largest_distance); printf("(%.2f,%.2f) is largest point\n", largest); result[f_r++] = largest; return largest; } __global__ void subhtKernel(double *d_ab, points *pt, points *a, points *b, points *c, int *limit, double *largest) { extern __shared__ kernel_shared_type sdata[]; unsigned int tid = threadIdx.x; if (tid < *limit) { double dx, dy, t_ac, t_bc, s, area; double h; dx = a->x - c[tid].x; dy = a->y - c[tid].y; t_ac = sqrt((dx * dx) + (dy * dy)); dx = b->x - c[tid].x; dy = b->y - c[tid].y; t_bc = sqrt((dx * dx) + (dy * dy)); s = (*d_ab + t_ac + t_bc) / 2; area = sqrt(s * (s - *d_ab) * (s - t_ac) * (s - t_bc)); h = (area * 2) / *d_ab; sdata[tid].largest = h; sdata[tid].x = c[tid].x; sdata[tid].y = c[tid].y; __syncthreads(); for (unsigned int s = *limit / 2; s >= 1; s = s / 2) { if (tid < s) { if (sdata[tid].largest < sdata[tid + s].largest) { sdata[tid].largest = sdata[tid + s].largest; sdata[tid].x = sdata[tid + s].x; sdata[tid].y = sdata[tid + s].y; } } __syncthreads(); } __syncthreads(); if (tid == 0) { *largest = sdata[0].largest; pt->x = sdata[0].x; pt->y = sdata[0].y; } } } points subht(points *c, int j, points a, points b) { double dx, dy, d_ab, t_ac, t_bc, area, s, largest_h; points pt; dx = a.x - b.x; //difference in x dy = a.y - b.y; //difference in y d_ab = sqrt((dx * dx) + (dy * dy)); //finding the distance using distance formula double *d_d_ab, *d_largest_h; int *d_j; points *d_pt, *d_a, *d_b, *d_c; hipMalloc((void**) &d_d_ab, sizeof(double)); hipMalloc((void**) &d_largest_h, sizeof(double)); hipMalloc((void**) &d_j, sizeof(int)); hipMalloc((void**) &d_pt, sizeof(points)); hipMalloc((void**) &d_a, sizeof(points)); hipMalloc((void**) &d_b, sizeof(points)); hipMalloc((void**) &d_c, sizeof(points) * 100); hipMemcpy(d_d_ab, &d_ab, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_j, &j, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_a, &a, sizeof(points), hipMemcpyHostToDevice); hipMemcpy(d_b, &b, sizeof(points), hipMemcpyHostToDevice); hipMemcpy(d_c, c, sizeof(points) * 100, hipMemcpyHostToDevice); unsigned int shared_size = sizeof(points) * j * j; hipLaunchKernelGGL(( subhtKernel), dim3(1), dim3(j), shared_size, 0, d_d_ab, d_pt, d_a, d_b, d_c, d_j, d_largest_h); hipDeviceSynchronize(); hipMemcpy(&largest_h, d_largest_h, sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(&pt, d_pt, sizeof(points), hipMemcpyDeviceToHost); hipFree(d_d_ab); hipFree(d_largest_h); hipFree(d_j); hipFree(d_pt); hipFree(d_a); hipFree(d_b); hipFree(d_c); printf("The maximum height is %.2f and point is (%.2f,%.2f)\n", largest_h, pt); result[f_r++] = pt; return pt; } void division(points *pts, int n, points smallest, points largest) { m = (double) (largest.y - smallest.y) / (largest.x - smallest.x); //slope of the line c = (double) smallest.y - (m * smallest.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %.2f \n", m, c); printf("(%.2f,%.2f) (%.2f,%.2f)\n", smallest, largest); j = 0; k = 0; #pragma acc parallel loop for (i = 0; i < n; i++) { temp_c = pts[i].y - (m * pts[i].x); //y=mx+c if (c - temp_c < 0) //points above the line { subset1[j].x = pts[i].x; subset1[j].y = pts[i].y; j++; } else if (c - temp_c > 0) //points below the line { subset2[k].x = pts[i].x; subset2[k].y = pts[i].y; k++; } } printf("Subset1:\n"); for (i = 0; i < j; i++) printf("(%.2f,%.2f)\n", subset1[i]); printf("j old:%d\n\n", j); printf("k old:%d\n\n", k); printf("Subset2:\n"); for (i = 0; i < k; i++) printf("(%.2f,%.2f)\n", subset2[i]); pt1 = subht(subset1, j, smallest, largest); pt2 = subht(subset2, k, smallest, largest); } void divsub1(points *pts, int j, points a, points b, points c) { int j2, k2, flag = 0; double m1, c1, m2, c2, h_m1, h_m2, c_m2, temp_c1, temp_c2, temp_c3; points subsubset1[100], subsubset2[100]; m1 = (double) (c.y - a.y) / (c.x - a.x); //slope of the line c1 = (double) a.y - (m1 * a.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %.2f \n", m1, c1); printf("(%.2f,%.2f) , (%.2f,%.2f)\n", a, c); m2 = (double) (c.y - b.y) / (c.x - b.x); //slope of the line c2 = (double) b.y - (m2 * b.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %.2f \n", m2, c2); printf("(%.2f,%.2f) , (%.2f,%.2f)\n", b, c); h_m1 = (b.y - a.y) / (b.x - a.x); h_m2 = -1 / (h_m1); c_m2 = (double) c.y - (h_m2 * c.x); printf("Slope of the line %.2f \nConstant of the line %.2f \n", h_m2, c_m2); j2 = 0; k2 = 0; printf("J:%d\n\n", j); #pragma acc parallel loop for (i = 0; i < j; i++) { temp_c3 = pts[i].y - (h_m2 * pts[i].x); //line hc temp_c2 = pts[i].y - (m2 * pts[i].x); //line bc temp_c1 = pts[i].y - (m1 * pts[i].x); //y=mx+c line a printf("point is:(%2.f,%2.f)\n", pts[i]); if ((pts[i].x - c.x) != 0) //infinte line { if (h_m2 > 0) //positive slope { if (c_m2 - temp_c3 < 0) //above the + line { printf("1-111111111\n"); if (c1 - temp_c1 < 0) //above the line { printf("1-222222\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; //flag=1; } } else if (c_m2 - temp_c3 > 0) //below the line +line { printf("1-3333333\n"); { if (m2 < 0) //negative slope { if (c2 - temp_c2 < 0) //above the line { printf("1-44444111111\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; //flag=1; } } else if (m2 > 0) //positive slope { if (c2 - temp_c2 > 0) //below the line { printf("1-4444422222\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; //flag=1; } } } /*else { */ } } else if (h_m2 < 0) //negative slope { if (c_m2 - temp_c3 > 0) //below the line -ve line { printf("1-55555555\n"); if (m1 > 0) //positive slope { if (c1 - temp_c1 < 0) //above the line { printf("1-66666666\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (m1 < 0) { if (c1 - temp_c1 > 0) //below the line { printf("1-66666666\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } } else if (c_m2 - temp_c3 < 0) //above the line -ve line { printf("1-777777777\n"); if (c2 - temp_c2 < 0) //above the line { printf("1-88888888\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } else if (h_m2 == 0) //horizontal line { if (c.y - pts[i].y < 0) //above the horizontal line { if (c1 - temp_c1 < 0) //above the line { printf("1-999999\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c.y - pts[i].y < 0) //below the horizontal line { if (c2 - temp_c2 > 0) //below the line { printf("1-1010101\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } else { if (c.x - pts[i].x < 0) //above the vertical line { if (c1 - temp_c1 < 0) //above { printf("1-12121212\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c.x - pts[i].x > 0) //below the vertical line { if (c2 - temp_c2 > 0) //below { printf("1-13131313\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } if (j2 != 0) { printf(" subset1:\n"); for (i = 0; i < j2; i++) printf("(%.2f,%.2f)\n", subsubset1[i]); pt11 = subht(subsubset1, j2, a, c); divsub1(subsubset1, j2, a, c, pt11); } else { draw(a, c); } if (k2 != 0) { printf(" subset2:\n"); for (i = 0; i < k2; i++) printf("(%.2f,%.2f)\n", subsubset2[i]); pt22 = subht(subsubset2, k2, c, b); divsub1(subsubset2, k2, c, b, pt22); } else { draw(c, b); } } void divsub2(points *pts, int k, points a, points b, points c) { int j2, k2; double m1, c1, m2, c2, h_m1, h_m2, c_m2, temp_c1, temp_c2, temp_c3; points subsubset1[100], subsubset2[100]; m1 = (double) (c.y - a.y) / (c.x - a.x); //slope of the line c1 = (double) a.y - (m1 * a.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %.2f \n", m1, c1); printf("(%.2f,%.2f) , (%.2f,%.2f)\n", a, c); m2 = (double) (c.y - b.y) / (c.x - b.x); //slope of the line c2 = (double) b.y - (m2 * b.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %f \n", m2, c2); printf("(%.2f,%.2f) , (%.2f,%.2f)\n", b, c); h_m1 = (b.y - a.y) / (b.x - a.x); h_m2 = -1 / (h_m1); c_m2 = (double) c.y - (h_m2 * c.x); printf("Slope of the line %.2f \nConstant of the line %.2f \n", h_m2, c_m2); j2 = 0; k2 = 0; for (i = 0; i < k; i++) { temp_c3 = pts[i].y - (h_m2 * pts[i].x); //line hc temp_c2 = pts[i].y - (m2 * pts[i].x); //line bc temp_c1 = pts[i].y - (m1 * pts[i].x); //y=mx+c line ac printf("temp_c2,(pt),index==%f,(%.2f,%.2f),%d\n", temp_c2, pts[i], i); if ((pts[i].x - c.x) != 0) //infinte line { if (h_m2 > 0) //positive slope { if (c_m2 - temp_c3 < 0) //above the +line { printf("2-11111 \n"); if (c1 - temp_c1 > 0) //below the line { printf("2-222222 \n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c_m2 - temp_c3 > 0) //below the +line { printf("2-333333 \n"); if (c2 - temp_c2 > 0) //below the line { printf("2-44444444444\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } else if (h_m2 < 0) //negative slope { printf("2-55555555 \n"); if (c_m2 - temp_c3 > 0) //below the -line { printf("2-66666 \n"); if (c1 - temp_c1 > 0) //below the line { subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c_m2 - temp_c3 < 0) //above the -line { printf("2-77777777 \n"); if (m2 < 0) //negative slope { if (c2 - temp_c2 < 0) //above the line { printf("2-88881111\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } else if (m2 > 0) //positive slope { if (c2 - temp_c2 > 0) //below the line { printf("2-888822222\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } else if (h_m2 == 0) //horizontal line { if (c.y - pts[i].y > 0) //below the horizontal line { if (c1 - temp_c1 > 0) //below the line { printf("2-999999\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c.y - pts[i].y < 0) //above the horizontal line { if (c2 - temp_c2 > 0) //below the line { printf("2-1010101\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } else { if (c.x - pts[i].x > 0) //below the vertical line { if (c1 - temp_c1 > 0) //below the line { printf("2-12121212\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c.x - pts[i].x < 0) //above the vertical line { if (c2 - temp_c2 < 0) //above the line { printf("1-13131313\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } printf("j2:%d\n", j2); printf("k2:%d\n", k2); if (j2 != 0) { printf("Subsubset1:\n"); for (i = 0; i < j2; i++) printf("(%.2f,%.2f)\n", subsubset1[i]); pt11 = subht(subsubset1, j2, a, c); divsub2(subsubset1, j2, a, c, pt11); } else { draw(a, c); } if (k2 != 0) { printf("Subsubset2:\n"); for (i = 0; i < k2; i++) printf("(%.2f,%.2f)\n", subsubset2[i]); pt22 = subht(subsubset2, k2, c, b); divsub2(subsubset2, k2, c, b, pt22); } else { draw(c, b); } } int main1() { /*clock_t start = clock(), stop, t;*/ double elapsed, xx = 0, yy = 0; //clock_t t; /*int i,ti; t=clock(); ti=(unsigned int)t; for(i=0;i<50;i++) { ti++; srand(ti); xx= rand()%50+1; yy= rand()%50+1; pts[i].x=xx; pts[i].y=yy; } */ float elapsedTime; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); n = (sizeof(pts) / sizeof(double)) / 2; smallest = minimum(pts, n); largest = maximum(pts, n, smallest); division(pts, n, smallest, largest); divsub1(subset1, j, smallest, largest, pt1); divsub2(subset2, k, smallest, largest, pt2); printf("final hull"); for (i = 0; i < f_r; i++) printf("(%.2f,%.2f)\n", result[i]); hipDeviceSynchronize(); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime,start,stop); printf("Time Taken = %f ms",elapsedTime); //stop = clock(); //elapsed = (double) (stop - start) * 1000.0 / CLOCKS_PER_SEC; //printf("Time elapsed in ms: %.2f", elapsed); return 0; } void keys(unsigned char key, int x, int y) { /*if(key=='s') { screen1=1; glutPostRedisplay(); }*/ if (key == 'a') { screen2 = 1; glutPostRedisplay(); } /*if(key=='i') { input=1; glutPostRedisplay(); }*/ } void mydisplay() { glClear(GL_COLOR_BUFFER_BIT); /*if(screen1==1) { show(pts,n); screen1=0; }*/ if (screen2 == 1) { //show(pts,n); main1(); screen2 = 0; } /*if(input==1) { getRandomPoints(20,25.00,25.00); input=0; }*/ //screen=0; glFlush(); } void init() { glMatrixMode(GL_PROJECTION); glLoadIdentity(); glPointSize(4.0); gluOrtho2D(-60, 60, -60, 60); glClearColor(1.0, 1.0, 1.0, 1.0); glColor3f(0.0, 0.0, 0.0); glMatrixMode(GL_MODELVIEW); // glLoadIdentity(); } int main(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB); glutInitWindowSize(600, 600); glutInitWindowPosition(10, 10); glutCreateWindow("simple"); glutKeyboardFunc(keys); glutDisplayFunc(mydisplay); init(); glutMainLoop(); }
09a2652d8a4c10b7319c776c520747d64d50de40.cu
#include<time.h> #include<stdlib.h> #include<stdio.h> #include<math.h> #include<GL/glut.h> #include <cuda.h> //#include "timer.h" #define pi 22/7; typedef struct { double x; double y; } points; typedef struct { double x; double y; double largest; } kernel_shared_type; //points pts[100]; //test case 1 points pts[] = { 3.0, 3.0, 4.0, 6.0, 1.0, 2.0, 5.0, 4.0, 7.0, 3.0, 6.0, 6.0, 1.0, 1.0, 2.0, 1.0, 6.0, 7.0, 8.0, 8.0, 5.0, 5.0, 9.0, 4.0, 7.0, 5.0, 1.0, 10.0, 2.0, 7.0, 5.0, 2.0, 2.0, 4.0, 8.0, 6.0, 3.0, 6.0, 4.0, 4.0, 6.0, 8.0, 5.0, 10.0, 1.0, 5.0, 4.0, 3.0, 8.0, 7.0, -2.0, 20.0, 9.0, 0.0, -29.0, -17.0, 17.0, 29.0, 2.0, 3.0, 16.0, 7.0, 19.0, 7.0, 8.0, 9.0, 19.0, 25.0, 23.0, 4.0, 17.0, 27.0, 18.0, 26.0, 6.0, 4.0, 12.0, 28.0, 5.0, 25.0, 2.0, 6.0, 28.0, 28.0, 20.0, 15.0, 16.0, 18.0, 8.0, 12.0, 7.0, 15.0, 3.0, 2.0, 9.0, 1.0, 14.0, 5.0, 4.0, 17.0, 2.0, 18.0, 7.0, 7.0, 14.0, 9.0, -19.0, -23.0, 12.0, -27.0, -15.0, 12, -29, -29, -20, 16, 25.0, 20.0, 14.0, 9.0, 1.0, 3.0, 9.0, 22.0, 6.0, 2.0, 7.0, 14.0, 19.0, 1.0, 17.0, 14.0, 8.0, 15.0, 25.0, 2.0, 18.0, 1.0, 3.0, 18.0 }; //test case 2 //points pts[]={-2.0,3.0,-15.0,-2.0,20.0,1.50,6.0,1.0,14.0,0.0,4.0,-2.30,-8.0,1.0,14.0,3.0,-24.0,-3.0,18.0,-2.0,-5.0,3.0,-36.0,23.0,7.0,6.0,16.0,21.0,32.0,15.0,42.0,19.0,22.0,33.0,25.0,35.0,39.0,29.0,28.0,49.0,43.0,17.0,28.0,20.0,37.0,27.0,23.0,34.0,32.0,13.0,36.0,16.0,24.0,35.0,39.0,29.0,15.0,26.0,43.0,34.0,12.0,32.0,31.0,21.0,35.0}; //horizontal points //test case 3 //points pts[]={2.0,27.0,1.0,-29.0,0.25,-15.62,-0.0,16.45,-1.25,6.0,-1.5,16.0,-1.25,18.0,-1.0,18.75,-2.0,15.0,-1.60,18.0,2.0,-29.0,1.0,-23.0,-2.0,-17.0,-2.0,24.0};//vertical lines //test case 4 //points pts[]={1.0,7.0,23.0,46.0,27.0,16.0,37.0,18.0,9.0,2.0,35.0,18.0,27.0,21.0,7.0,2.0,23.0,32.0,6.0,3.0,25.0,6.0,28.0,17.0,5.0,38.0,48.0,43.0,28.0,27.0,34.0,47.0,39.0,40.0,35.0,8.0,34.0,32.0,3.0,4.0,3.0,5.0,16.0,9.0,35.0,24.0,1.0,24.0,46.0,29.0,43.0,15.0,32.0,5.0,17.0,16.0,26.0,48.0,29.0,30.0,36.0,24.0,4.0,3.0,12.0,12.0,5.0,8.0,27.0,46.0}; points pt1, pt2, pt11, pt22, smallest, largest; int i, j, k, r = 0, l2 = 0, count = 0, f_r = 0, screen1 = 0, screen2 = 0, n, input = 0; //int n; double m, c, temp_c, xx, yy; //m1,c1,m2,c2,temp_c,temp_c1,temp_c2,temp_c3,c_m2,h_m1,h_m2; points subset1[100], subset2[100], result[100]; //,subsubset1[100],subsubset2[100],result[100];; /*void getRandomPoints(int n,int xmax,int ymax) { for (i = 0; i < n; i++) { xx=rand()%ymax; yy=rand()%xmax; pts[i].x=xx; pts[i].y=yy; } //points.push( [ xMax /4 + r * Math.cos(theta), yMax/2 + 2 * r * Math.sin(theta) ] ) } /* var phase = Math.random() * Math.PI * 2; for (var i = 0; i < numPoint/2; i++) { var r = Math.random()*xMax/4; var theta = Math.random() * 1.5 * Math.PI + phase; points.push( [ xMax /4 * 3 + r * Math.cos(theta), yMax/2 + r * Math.sin(theta) ] ) }*/ /*void show(points pts[],int n) { int i; printf("N:%d \n",n);*/ void draw(points a, points c) { glColor3f(0.0, 0.0, 0.0); for (i = 0; i < n; i++) { glBegin(GL_POINTS); glVertex2d(pts[i].x, pts[i].y); glEnd(); } glBegin(GL_LINES); { glVertex2d(a.x, a.y); glVertex2d(c.x, c.y); } glEnd(); } points minimum(points *pts, int n) { points smallest_x = pts[0]; //pt with smallest x // points smallest_y=pts[0]; //pt wit smallest y points smallest; printf("The Points are: \n"); for (i = 0; i < n; i++) printf("(%.2f,%.2f)\n", pts[i]); for (i = 0; i < n; i++) { if (pts[i].x < smallest_x.x) smallest_x = pts[i]; else if (pts[i].x == smallest_x.x) { if (pts[i].y > smallest_x.y) smallest_x = pts[i]; } //if(pts[i].y < smallest_y.y) //smallest_y=pts[i]; } printf("(%.2f,%.2f) is smallest x \n", smallest_x); //printf("(%.2f,%.2f) is smallest y \n",smallest_y); //if((smallest_x.x < smallest_y.x)&&(smallest_x.y < smallest_y.y)) //compares the smallest in x & y and finalize the smallest smallest = smallest_x; //else //smallest=smallest_y; //show(pts,n); printf("(%.2f,%.2f) is smallest \n", smallest); result[f_r++] = smallest; return smallest; } points maximum(points *pts, int n, points smallest) { double distance, largest_distance = 0.0; //finding the largest distance pt double dx, dy; points largest; for (i = 0; i < n; i++) { dx = smallest.x - pts[i].x; //difference in x dy = smallest.y - pts[i].y; //difference in y distance = sqrt((dx * dx) + (dy * dy)); //finding the distance using distance formula if (largest_distance < distance) { largest_distance = distance; largest.x = pts[i].x; largest.y = pts[i].y; } } printf("%.2f is largest distance\n", largest_distance); printf("(%.2f,%.2f) is largest point\n", largest); result[f_r++] = largest; return largest; } __global__ void subhtKernel(double *d_ab, points *pt, points *a, points *b, points *c, int *limit, double *largest) { extern __shared__ kernel_shared_type sdata[]; unsigned int tid = threadIdx.x; if (tid < *limit) { double dx, dy, t_ac, t_bc, s, area; double h; dx = a->x - c[tid].x; dy = a->y - c[tid].y; t_ac = sqrt((dx * dx) + (dy * dy)); dx = b->x - c[tid].x; dy = b->y - c[tid].y; t_bc = sqrt((dx * dx) + (dy * dy)); s = (*d_ab + t_ac + t_bc) / 2; area = sqrt(s * (s - *d_ab) * (s - t_ac) * (s - t_bc)); h = (area * 2) / *d_ab; sdata[tid].largest = h; sdata[tid].x = c[tid].x; sdata[tid].y = c[tid].y; __syncthreads(); for (unsigned int s = *limit / 2; s >= 1; s = s / 2) { if (tid < s) { if (sdata[tid].largest < sdata[tid + s].largest) { sdata[tid].largest = sdata[tid + s].largest; sdata[tid].x = sdata[tid + s].x; sdata[tid].y = sdata[tid + s].y; } } __syncthreads(); } __syncthreads(); if (tid == 0) { *largest = sdata[0].largest; pt->x = sdata[0].x; pt->y = sdata[0].y; } } } points subht(points *c, int j, points a, points b) { double dx, dy, d_ab, t_ac, t_bc, area, s, largest_h; points pt; dx = a.x - b.x; //difference in x dy = a.y - b.y; //difference in y d_ab = sqrt((dx * dx) + (dy * dy)); //finding the distance using distance formula double *d_d_ab, *d_largest_h; int *d_j; points *d_pt, *d_a, *d_b, *d_c; cudaMalloc((void**) &d_d_ab, sizeof(double)); cudaMalloc((void**) &d_largest_h, sizeof(double)); cudaMalloc((void**) &d_j, sizeof(int)); cudaMalloc((void**) &d_pt, sizeof(points)); cudaMalloc((void**) &d_a, sizeof(points)); cudaMalloc((void**) &d_b, sizeof(points)); cudaMalloc((void**) &d_c, sizeof(points) * 100); cudaMemcpy(d_d_ab, &d_ab, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_j, &j, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_a, &a, sizeof(points), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, sizeof(points), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, sizeof(points) * 100, cudaMemcpyHostToDevice); unsigned int shared_size = sizeof(points) * j * j; subhtKernel<<<1, j, shared_size>>>(d_d_ab, d_pt, d_a, d_b, d_c, d_j, d_largest_h); cudaDeviceSynchronize(); cudaMemcpy(&largest_h, d_largest_h, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&pt, d_pt, sizeof(points), cudaMemcpyDeviceToHost); cudaFree(d_d_ab); cudaFree(d_largest_h); cudaFree(d_j); cudaFree(d_pt); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("The maximum height is %.2f and point is (%.2f,%.2f)\n", largest_h, pt); result[f_r++] = pt; return pt; } void division(points *pts, int n, points smallest, points largest) { m = (double) (largest.y - smallest.y) / (largest.x - smallest.x); //slope of the line c = (double) smallest.y - (m * smallest.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %.2f \n", m, c); printf("(%.2f,%.2f) (%.2f,%.2f)\n", smallest, largest); j = 0; k = 0; #pragma acc parallel loop for (i = 0; i < n; i++) { temp_c = pts[i].y - (m * pts[i].x); //y=mx+c if (c - temp_c < 0) //points above the line { subset1[j].x = pts[i].x; subset1[j].y = pts[i].y; j++; } else if (c - temp_c > 0) //points below the line { subset2[k].x = pts[i].x; subset2[k].y = pts[i].y; k++; } } printf("Subset1:\n"); for (i = 0; i < j; i++) printf("(%.2f,%.2f)\n", subset1[i]); printf("j old:%d\n\n", j); printf("k old:%d\n\n", k); printf("Subset2:\n"); for (i = 0; i < k; i++) printf("(%.2f,%.2f)\n", subset2[i]); pt1 = subht(subset1, j, smallest, largest); pt2 = subht(subset2, k, smallest, largest); } void divsub1(points *pts, int j, points a, points b, points c) { int j2, k2, flag = 0; double m1, c1, m2, c2, h_m1, h_m2, c_m2, temp_c1, temp_c2, temp_c3; points subsubset1[100], subsubset2[100]; m1 = (double) (c.y - a.y) / (c.x - a.x); //slope of the line c1 = (double) a.y - (m1 * a.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %.2f \n", m1, c1); printf("(%.2f,%.2f) , (%.2f,%.2f)\n", a, c); m2 = (double) (c.y - b.y) / (c.x - b.x); //slope of the line c2 = (double) b.y - (m2 * b.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %.2f \n", m2, c2); printf("(%.2f,%.2f) , (%.2f,%.2f)\n", b, c); h_m1 = (b.y - a.y) / (b.x - a.x); h_m2 = -1 / (h_m1); c_m2 = (double) c.y - (h_m2 * c.x); printf("Slope of the line %.2f \nConstant of the line %.2f \n", h_m2, c_m2); j2 = 0; k2 = 0; printf("J:%d\n\n", j); #pragma acc parallel loop for (i = 0; i < j; i++) { temp_c3 = pts[i].y - (h_m2 * pts[i].x); //line hc temp_c2 = pts[i].y - (m2 * pts[i].x); //line bc temp_c1 = pts[i].y - (m1 * pts[i].x); //y=mx+c line a printf("point is:(%2.f,%2.f)\n", pts[i]); if ((pts[i].x - c.x) != 0) //infinte line { if (h_m2 > 0) //positive slope { if (c_m2 - temp_c3 < 0) //above the + line { printf("1-111111111\n"); if (c1 - temp_c1 < 0) //above the line { printf("1-222222\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; //flag=1; } } else if (c_m2 - temp_c3 > 0) //below the line +line { printf("1-3333333\n"); { if (m2 < 0) //negative slope { if (c2 - temp_c2 < 0) //above the line { printf("1-44444111111\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; //flag=1; } } else if (m2 > 0) //positive slope { if (c2 - temp_c2 > 0) //below the line { printf("1-4444422222\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; //flag=1; } } } /*else { */ } } else if (h_m2 < 0) //negative slope { if (c_m2 - temp_c3 > 0) //below the line -ve line { printf("1-55555555\n"); if (m1 > 0) //positive slope { if (c1 - temp_c1 < 0) //above the line { printf("1-66666666\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (m1 < 0) { if (c1 - temp_c1 > 0) //below the line { printf("1-66666666\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } } else if (c_m2 - temp_c3 < 0) //above the line -ve line { printf("1-777777777\n"); if (c2 - temp_c2 < 0) //above the line { printf("1-88888888\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } else if (h_m2 == 0) //horizontal line { if (c.y - pts[i].y < 0) //above the horizontal line { if (c1 - temp_c1 < 0) //above the line { printf("1-999999\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c.y - pts[i].y < 0) //below the horizontal line { if (c2 - temp_c2 > 0) //below the line { printf("1-1010101\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } else { if (c.x - pts[i].x < 0) //above the vertical line { if (c1 - temp_c1 < 0) //above { printf("1-12121212\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c.x - pts[i].x > 0) //below the vertical line { if (c2 - temp_c2 > 0) //below { printf("1-13131313\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } if (j2 != 0) { printf(" subset1:\n"); for (i = 0; i < j2; i++) printf("(%.2f,%.2f)\n", subsubset1[i]); pt11 = subht(subsubset1, j2, a, c); divsub1(subsubset1, j2, a, c, pt11); } else { draw(a, c); } if (k2 != 0) { printf(" subset2:\n"); for (i = 0; i < k2; i++) printf("(%.2f,%.2f)\n", subsubset2[i]); pt22 = subht(subsubset2, k2, c, b); divsub1(subsubset2, k2, c, b, pt22); } else { draw(c, b); } } void divsub2(points *pts, int k, points a, points b, points c) { int j2, k2; double m1, c1, m2, c2, h_m1, h_m2, c_m2, temp_c1, temp_c2, temp_c3; points subsubset1[100], subsubset2[100]; m1 = (double) (c.y - a.y) / (c.x - a.x); //slope of the line c1 = (double) a.y - (m1 * a.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %.2f \n", m1, c1); printf("(%.2f,%.2f) , (%.2f,%.2f)\n", a, c); m2 = (double) (c.y - b.y) / (c.x - b.x); //slope of the line c2 = (double) b.y - (m2 * b.x); //constant of the line printf("Slope of the line %.2f \nConstant of the line %f \n", m2, c2); printf("(%.2f,%.2f) , (%.2f,%.2f)\n", b, c); h_m1 = (b.y - a.y) / (b.x - a.x); h_m2 = -1 / (h_m1); c_m2 = (double) c.y - (h_m2 * c.x); printf("Slope of the line %.2f \nConstant of the line %.2f \n", h_m2, c_m2); j2 = 0; k2 = 0; for (i = 0; i < k; i++) { temp_c3 = pts[i].y - (h_m2 * pts[i].x); //line hc temp_c2 = pts[i].y - (m2 * pts[i].x); //line bc temp_c1 = pts[i].y - (m1 * pts[i].x); //y=mx+c line ac printf("temp_c2,(pt),index==%f,(%.2f,%.2f),%d\n", temp_c2, pts[i], i); if ((pts[i].x - c.x) != 0) //infinte line { if (h_m2 > 0) //positive slope { if (c_m2 - temp_c3 < 0) //above the +line { printf("2-11111 \n"); if (c1 - temp_c1 > 0) //below the line { printf("2-222222 \n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c_m2 - temp_c3 > 0) //below the +line { printf("2-333333 \n"); if (c2 - temp_c2 > 0) //below the line { printf("2-44444444444\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } else if (h_m2 < 0) //negative slope { printf("2-55555555 \n"); if (c_m2 - temp_c3 > 0) //below the -line { printf("2-66666 \n"); if (c1 - temp_c1 > 0) //below the line { subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c_m2 - temp_c3 < 0) //above the -line { printf("2-77777777 \n"); if (m2 < 0) //negative slope { if (c2 - temp_c2 < 0) //above the line { printf("2-88881111\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } else if (m2 > 0) //positive slope { if (c2 - temp_c2 > 0) //below the line { printf("2-888822222\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } else if (h_m2 == 0) //horizontal line { if (c.y - pts[i].y > 0) //below the horizontal line { if (c1 - temp_c1 > 0) //below the line { printf("2-999999\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c.y - pts[i].y < 0) //above the horizontal line { if (c2 - temp_c2 > 0) //below the line { printf("2-1010101\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } else { if (c.x - pts[i].x > 0) //below the vertical line { if (c1 - temp_c1 > 0) //below the line { printf("2-12121212\n"); subsubset1[j2].x = pts[i].x; subsubset1[j2].y = pts[i].y; j2++; } } else if (c.x - pts[i].x < 0) //above the vertical line { if (c2 - temp_c2 < 0) //above the line { printf("1-13131313\n"); subsubset2[k2].x = pts[i].x; subsubset2[k2].y = pts[i].y; k2++; } } } } printf("j2:%d\n", j2); printf("k2:%d\n", k2); if (j2 != 0) { printf("Subsubset1:\n"); for (i = 0; i < j2; i++) printf("(%.2f,%.2f)\n", subsubset1[i]); pt11 = subht(subsubset1, j2, a, c); divsub2(subsubset1, j2, a, c, pt11); } else { draw(a, c); } if (k2 != 0) { printf("Subsubset2:\n"); for (i = 0; i < k2; i++) printf("(%.2f,%.2f)\n", subsubset2[i]); pt22 = subht(subsubset2, k2, c, b); divsub2(subsubset2, k2, c, b, pt22); } else { draw(c, b); } } int main1() { /*clock_t start = clock(), stop, t;*/ double elapsed, xx = 0, yy = 0; //clock_t t; /*int i,ti; t=clock(); ti=(unsigned int)t; for(i=0;i<50;i++) { ti++; srand(ti); xx= rand()%50+1; yy= rand()%50+1; pts[i].x=xx; pts[i].y=yy; } */ float elapsedTime; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); n = (sizeof(pts) / sizeof(double)) / 2; smallest = minimum(pts, n); largest = maximum(pts, n, smallest); division(pts, n, smallest, largest); divsub1(subset1, j, smallest, largest, pt1); divsub2(subset2, k, smallest, largest, pt2); printf("final hull"); for (i = 0; i < f_r; i++) printf("(%.2f,%.2f)\n", result[i]); cudaThreadSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); printf("Time Taken = %f ms",elapsedTime); //stop = clock(); //elapsed = (double) (stop - start) * 1000.0 / CLOCKS_PER_SEC; //printf("Time elapsed in ms: %.2f", elapsed); return 0; } void keys(unsigned char key, int x, int y) { /*if(key=='s') { screen1=1; glutPostRedisplay(); }*/ if (key == 'a') { screen2 = 1; glutPostRedisplay(); } /*if(key=='i') { input=1; glutPostRedisplay(); }*/ } void mydisplay() { glClear(GL_COLOR_BUFFER_BIT); /*if(screen1==1) { show(pts,n); screen1=0; }*/ if (screen2 == 1) { //show(pts,n); main1(); screen2 = 0; } /*if(input==1) { getRandomPoints(20,25.00,25.00); input=0; }*/ //screen=0; glFlush(); } void init() { glMatrixMode(GL_PROJECTION); glLoadIdentity(); glPointSize(4.0); gluOrtho2D(-60, 60, -60, 60); glClearColor(1.0, 1.0, 1.0, 1.0); glColor3f(0.0, 0.0, 0.0); glMatrixMode(GL_MODELVIEW); // glLoadIdentity(); } int main(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB); glutInitWindowSize(600, 600); glutInitWindowPosition(10, 10); glutCreateWindow("simple"); glutKeyboardFunc(keys); glutDisplayFunc(mydisplay); init(); glutMainLoop(); }
6caebffb4f7a115a9c7ce58e6b55bb393bd38dc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "my_device_function.cuh" __global__ void sigmoid(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { a[tid] = 1/(1+expf(-z[tid])); tid+= blockDim.x * gridDim.x; } } __global__ void sigmoid_inv(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { a[tid] = (1/(1+expf(-z[tid])))*(1 - 1/(1+expf(-z[tid]))); tid+= blockDim.x * gridDim.x; } } __global__ void relu(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = z[tid]; else a[tid] = 0.0; tid+= blockDim.x * gridDim.x; } } __global__ void relu_inv(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = 1.0; else a[tid] = 0.0; tid+= blockDim.x * gridDim.x; } } __global__ void elu(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = z[tid]; else a[tid] = expf(z[tid]) - 1; tid+= blockDim.x * gridDim.x; } } __global__ void elu_inv(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = 1.0; else a[tid] = expf(z[tid]); tid+= blockDim.x * gridDim.x; } } __global__ void new_activation(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = 2*logf(z[tid] + 1); else a[tid] = -2*logf(-z[tid] + 1); tid+= blockDim.x * gridDim.x; } } __global__ void new_activation_inv(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = 2/(1+z[tid]); else a[tid] = 2/(-z[tid]+1); tid+= blockDim.x * gridDim.x; } } __global__ void deliver_front_to_rear(float *front,float *rear,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { rear[tid] = front[tid]; tid+= blockDim.x * gridDim.x; } } __global__ void add_bias(float *z,float *b,long column,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { z[tid] += b[tid % column]; tid+= blockDim.x * gridDim.x; } } __global__ void last_delta_before_transpose(float *temp, float *y,float *T,long batch_size,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { temp[tid] = (y[tid]-T[tid])/(2*batch_size); tid+= blockDim.x * gridDim.x; } } __global__ void transpose(float *after, float *before,long before_columns,long before_rows) { long tid = blockIdx.x*blockDim.x + threadIdx.x; long x,y; while(tid < before_columns*before_rows) { y = tid % before_columns; x = tid / before_columns; after[IDX2C(x,y,before_rows)] = before[IDX2C(y,x,before_columns)]; tid+= blockDim.x * gridDim.x; } } __global__ void basic_multi(float *a,float *b,float *c, long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { c[tid] = a[tid]*b[tid]; tid+= blockDim.x * gridDim.x; } } __global__ void loss_cross_entropy(float *target,float *y, float * result,long last_neural,long batch_size) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < last_neural*batch_size) { result[tid] = -0.5*(target[tid]*logf(y[tid] + 0.000000001) + (1.0 - target[tid])*logf(1-y[tid] + 0.000000001)); tid+= blockDim.x * gridDim.x; } } __global__ void matching(float *target,float *y, float * result,long last_neural,long batch_size) { long tid = blockIdx.x*blockDim.x + threadIdx.x; int target_inx; int y_inx; while(tid < batch_size) { float max = 0.0; for(int i = 0 ; i < last_neural ; i++) { if(target[IDX2C(i,tid,last_neural)] > 0.9) { target_inx = i; } if(y[IDX2C(i,tid,last_neural)] > max) { max = y[IDX2C(i,tid,last_neural)]; y_inx = i; } } if(target_inx == y_inx) result[tid] = 1.0; else result[tid] = 0.0; tid+= blockDim.x * gridDim.x; } } __global__ void weight_update(float *w,float *delta_w, float alpha,float ramda,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { w[tid] = w[tid] - alpha*(delta_w[tid] + ramda*w[tid]); tid+= blockDim.x * gridDim.x; } } __global__ void bias_update(float *b,float *delta_b, float alpha,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { b[tid] = b[tid] - alpha*delta_b[tid]; tid+= blockDim.x * gridDim.x; } } __global__ void init_zeros(float *a, long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { a[tid] = 0.0; tid+= blockDim.x * gridDim.x; } } __global__ void init_ones(float *a, long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { a[tid] = 1.0; tid+= blockDim.x * gridDim.x; } } __global__ void adam_mean(float *adam_mean, float* delta, float beta1,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { adam_mean[tid] = (beta1*adam_mean[tid] + (1.0- beta1)*delta[tid]); tid+= blockDim.x * gridDim.x; } } __global__ void adam_var(float *adam_var, float* delta, float beta2,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { adam_var[tid] = (beta2*adam_var[tid] + (1.0- beta2)*delta[tid]*delta[tid]); tid+= blockDim.x * gridDim.x; } } __global__ void adam_sum(float *result, float *adam_mean,float *adam_var,float beta1_t,float beta2_t,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { result[tid] = (adam_mean[tid]/(1-beta1_t))/(sqrtf(adam_var[tid]/(1-beta2_t)) + 0.00000001); tid+= blockDim.x * gridDim.x; } } __global__ void maxnorm_constraints(float *a, float max,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(a[tid] > max) a[tid] = max; else if(a[tid] < -max) a[tid] = -max; tid+= blockDim.x * gridDim.x; } } __global__ void inverted_dropout(float *dropout,float *probability, float dropout_rate,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(probability[tid] > dropout_rate) dropout[tid] = 0.0; else dropout[tid] = 1.0/dropout_rate; tid+= blockDim.x * gridDim.x; } }
6caebffb4f7a115a9c7ce58e6b55bb393bd38dc2.cu
#include "my_device_function.cuh" __global__ void sigmoid(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { a[tid] = 1/(1+expf(-z[tid])); tid+= blockDim.x * gridDim.x; } } __global__ void sigmoid_inv(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { a[tid] = (1/(1+expf(-z[tid])))*(1 - 1/(1+expf(-z[tid]))); tid+= blockDim.x * gridDim.x; } } __global__ void relu(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = z[tid]; else a[tid] = 0.0; tid+= blockDim.x * gridDim.x; } } __global__ void relu_inv(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = 1.0; else a[tid] = 0.0; tid+= blockDim.x * gridDim.x; } } __global__ void elu(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = z[tid]; else a[tid] = expf(z[tid]) - 1; tid+= blockDim.x * gridDim.x; } } __global__ void elu_inv(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = 1.0; else a[tid] = expf(z[tid]); tid+= blockDim.x * gridDim.x; } } __global__ void new_activation(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = 2*logf(z[tid] + 1); else a[tid] = -2*logf(-z[tid] + 1); tid+= blockDim.x * gridDim.x; } } __global__ void new_activation_inv(float *a,float *z,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(z[tid] > 0 ) a[tid] = 2/(1+z[tid]); else a[tid] = 2/(-z[tid]+1); tid+= blockDim.x * gridDim.x; } } __global__ void deliver_front_to_rear(float *front,float *rear,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { rear[tid] = front[tid]; tid+= blockDim.x * gridDim.x; } } __global__ void add_bias(float *z,float *b,long column,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { z[tid] += b[tid % column]; tid+= blockDim.x * gridDim.x; } } __global__ void last_delta_before_transpose(float *temp, float *y,float *T,long batch_size,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { temp[tid] = (y[tid]-T[tid])/(2*batch_size); tid+= blockDim.x * gridDim.x; } } __global__ void transpose(float *after, float *before,long before_columns,long before_rows) { long tid = blockIdx.x*blockDim.x + threadIdx.x; long x,y; while(tid < before_columns*before_rows) { y = tid % before_columns; x = tid / before_columns; after[IDX2C(x,y,before_rows)] = before[IDX2C(y,x,before_columns)]; tid+= blockDim.x * gridDim.x; } } __global__ void basic_multi(float *a,float *b,float *c, long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { c[tid] = a[tid]*b[tid]; tid+= blockDim.x * gridDim.x; } } __global__ void loss_cross_entropy(float *target,float *y, float * result,long last_neural,long batch_size) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < last_neural*batch_size) { result[tid] = -0.5*(target[tid]*logf(y[tid] + 0.000000001) + (1.0 - target[tid])*logf(1-y[tid] + 0.000000001)); tid+= blockDim.x * gridDim.x; } } __global__ void matching(float *target,float *y, float * result,long last_neural,long batch_size) { long tid = blockIdx.x*blockDim.x + threadIdx.x; int target_inx; int y_inx; while(tid < batch_size) { float max = 0.0; for(int i = 0 ; i < last_neural ; i++) { if(target[IDX2C(i,tid,last_neural)] > 0.9) { target_inx = i; } if(y[IDX2C(i,tid,last_neural)] > max) { max = y[IDX2C(i,tid,last_neural)]; y_inx = i; } } if(target_inx == y_inx) result[tid] = 1.0; else result[tid] = 0.0; tid+= blockDim.x * gridDim.x; } } __global__ void weight_update(float *w,float *delta_w, float alpha,float ramda,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { w[tid] = w[tid] - alpha*(delta_w[tid] + ramda*w[tid]); tid+= blockDim.x * gridDim.x; } } __global__ void bias_update(float *b,float *delta_b, float alpha,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { b[tid] = b[tid] - alpha*delta_b[tid]; tid+= blockDim.x * gridDim.x; } } __global__ void init_zeros(float *a, long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { a[tid] = 0.0; tid+= blockDim.x * gridDim.x; } } __global__ void init_ones(float *a, long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { a[tid] = 1.0; tid+= blockDim.x * gridDim.x; } } __global__ void adam_mean(float *adam_mean, float* delta, float beta1,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { adam_mean[tid] = (beta1*adam_mean[tid] + (1.0- beta1)*delta[tid]); tid+= blockDim.x * gridDim.x; } } __global__ void adam_var(float *adam_var, float* delta, float beta2,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { adam_var[tid] = (beta2*adam_var[tid] + (1.0- beta2)*delta[tid]*delta[tid]); tid+= blockDim.x * gridDim.x; } } __global__ void adam_sum(float *result, float *adam_mean,float *adam_var,float beta1_t,float beta2_t,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { result[tid] = (adam_mean[tid]/(1-beta1_t))/(sqrtf(adam_var[tid]/(1-beta2_t)) + 0.00000001); tid+= blockDim.x * gridDim.x; } } __global__ void maxnorm_constraints(float *a, float max,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(a[tid] > max) a[tid] = max; else if(a[tid] < -max) a[tid] = -max; tid+= blockDim.x * gridDim.x; } } __global__ void inverted_dropout(float *dropout,float *probability, float dropout_rate,long n) { long tid = blockIdx.x*blockDim.x + threadIdx.x; while(tid < n) { if(probability[tid] > dropout_rate) dropout[tid] = 0.0; else dropout[tid] = 1.0/dropout_rate; tid+= blockDim.x * gridDim.x; } }
f48948977da3daee2a4cbac4913f946a966926b1.hip
// !!! This is a file automatically generated by hipify!!! // nvcc conv.cu -o conv -std=c++11 `pkg-config opencv --cflags --libs` -lcudnn // or Makefile make #include "opencv2/opencv.hpp" #include <cudnn.h> // http://www.goldsborough.me/cuda/ml/cudnn/c++/2017/10/01/14-37-23-convolutions_with_cudnn/ #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } cv::Mat load_image(const char* image_path) { cv::Mat image = cv::imread(image_path, cv::IMREAD_COLOR); // or CV_LOAD_IMAGE_COLOR image.convertTo(image, CV_32FC3); // image.type()=CV_32FC3; image.type() cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);// 0.1. return image; } void save_image(const char* output_filename, float* buffer, int height, int width) { cv::Mat output_image(height, width, CV_32FC3, buffer); // Make negative values zero. cv::threshold(output_image, output_image, /*threshold=*/0, /*maxval=*/0, cv::THRESH_TOZERO); cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX); // saturate_cast<uchar> output_image.convertTo(output_image, CV_8UC3); cv::imwrite(output_filename, output_image); } int main(int argc, const char* argv[]) { if (argc < 2) { std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl; std::exit(EXIT_FAILURE); } int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0; std::cerr << "GPU: " << gpu_id << std::endl; bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0; std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl; cv::Mat image = load_image(argv[1]); hipSetDevice(gpu_id); // GPU // cudnn handle cudnnHandle_t cudnn; cudnnCreate(&cudnn); // cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor, /*format=*/CUDNN_TENSOR_NHWC, // NHWCTensorFlow NHWC ( BGR) /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/image.rows, /*image_width=*/image.cols)); // cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, // NCHW /*out_channels=*/3, /*in_channels=*/3, /*kernel_height=*/3, /*kernel_width=*/3)); // cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, // CUDNN_CONVOLUTION /*computeType=*/CUDNN_DATA_FLOAT)); // int batch_size{ 0 }, channels{ 0 }, height{ 0 }, width{ 0 }; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor, input_descriptor, kernel_descriptor, &batch_size, &channels, &height, &width)); std::cerr << "Output Image: " << height << " x " << width << " x " << channels << std::endl; // cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor, /*format=*/CUDNN_TENSOR_NHWC, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/image.rows, /*image_width=*/image.cols)); // // cudnn_tion_fwd_algo_gemm // cudnn_tion_fwd_algo_fft(FFT) // cudnn_tion_fwd_algo_winogradWinograd cudnnConvolutionFwdAlgo_t convolution_algorithm; checkCUDNN( cudnnGetConvolutionForwardAlgorithm(cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, // CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMITmemoryLimitInBytes 0 /*memoryLimitInBytes=*/0, &convolution_algorithm)); // cuDNN size_t workspace_bytes{ 0 }; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB" << std::endl; assert(workspace_bytes > 0); // ************************************************************************* // cudnnGetConvolutionForwardWorkspaceSize void* d_workspace{ nullptr }; hipMalloc(&d_workspace, workspace_bytes); // cudnnGetConvolution2dForwardOutputDim int image_bytes = batch_size * channels * height * width * sizeof(float); float* d_input{ nullptr }; hipMalloc(&d_input, image_bytes); hipMemcpy(d_input, image.ptr<float>(0), image_bytes, hipMemcpyHostToDevice); float* d_output{ nullptr }; hipMalloc(&d_output, image_bytes); hipMemset(d_output, 0, image_bytes); // ************************************************************************* // clang-format off /* const float kernel_template[3][3] = { { 1, 1, 1 }, { 1, -8, 1 }, { 1, 1, 1 } }; */ const float kernel_template[3][3] = { { 0, 1, 0 }, { 1, -4, 1 }, { 0, 1, 0 } }; // clang-format on float h_kernel[3][3][3][3]; // NCHW for (int kernel = 0; kernel < 3; ++kernel) { for (int channel = 0; channel < 3; ++channel) { for (int row = 0; row < 3; ++row) { for (int column = 0; column < 3; ++column) { h_kernel[kernel][channel][row][column] = kernel_template[row][column]; } } } } float* d_kernel{ nullptr }; hipMalloc(&d_kernel, sizeof(h_kernel)); hipMemcpy(d_kernel, h_kernel, sizeof(h_kernel), hipMemcpyHostToDevice); // ************************************************************************* const float alpha = 1.0f, beta = 0.0f; // checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, d_input, kernel_descriptor, d_kernel, convolution_descriptor, convolution_algorithm, d_workspace, // d_workspacenullptr workspace_bytes, &beta, output_descriptor, d_output)); if (with_sigmoid) { // cudnnActivationDescriptor_t activation_descriptor; checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor)); checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor, CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, /*relu_coef=*/0)); // sigmoid checkCUDNN(cudnnActivationForward(cudnn, activation_descriptor, &alpha, output_descriptor, d_output, &beta, output_descriptor, d_output)); cudnnDestroyActivationDescriptor(activation_descriptor); } float* h_output = new float[image_bytes]; hipMemcpy(h_output, d_output, image_bytes, hipMemcpyDeviceToHost); save_image("./cudnn-out.png", h_output, height, width); delete[] h_output; hipFree(d_kernel); hipFree(d_input); hipFree(d_output); hipFree(d_workspace); // cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); }
f48948977da3daee2a4cbac4913f946a966926b1.cu
// 直接编译:nvcc conv.cu -o conv -std=c++11 `pkg-config opencv --cflags --libs` -lcudnn // or 使用Makefile文件 直接make #include "opencv2/opencv.hpp" #include <cudnn.h> // http://www.goldsborough.me/cuda/ml/cudnn/c++/2017/10/01/14-37-23-convolutions_with_cudnn/ #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } cv::Mat load_image(const char* image_path) { cv::Mat image = cv::imread(image_path, cv::IMREAD_COLOR); // or CV_LOAD_IMAGE_COLOR image.convertTo(image, CV_32FC3); // image.type()=CV_32FC3; 报错,image.type()不能修改,是常量类型 cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);// 转到0.~1. return image; } void save_image(const char* output_filename, float* buffer, int height, int width) { cv::Mat output_image(height, width, CV_32FC3, buffer); // Make negative values zero. cv::threshold(output_image, output_image, /*threshold=*/0, /*maxval=*/0, cv::THRESH_TOZERO); cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX); // saturate_cast<uchar> output_image.convertTo(output_image, CV_8UC3); cv::imwrite(output_filename, output_image); } int main(int argc, const char* argv[]) { if (argc < 2) { std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl; std::exit(EXIT_FAILURE); } int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0; std::cerr << "GPU: " << gpu_id << std::endl; bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0; std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl; cv::Mat image = load_image(argv[1]); cudaSetDevice(gpu_id); // 设置使用哪块GPU // 创建cudnn handle cudnnHandle_t cudnn; cudnnCreate(&cudnn); // 输入张量的描述 cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor, /*format=*/CUDNN_TENSOR_NHWC, // 注意是 NHWC,TensorFlow更喜欢以 NHWC 格式存储张量(通道是变化最频繁的地方,即 BGR),而其他一些更喜欢将通道放在前面 /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/image.rows, /*image_width=*/image.cols)); // 卷积核的描述(形状、格式) cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, // 注意是 NCHW /*out_channels=*/3, /*in_channels=*/3, /*kernel_height=*/3, /*kernel_width=*/3)); // 卷积操作的描述(步长、填充等等) cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, // CUDNN_CONVOLUTION /*computeType=*/CUDNN_DATA_FLOAT)); // 计算卷积后图像的维数 int batch_size{ 0 }, channels{ 0 }, height{ 0 }, width{ 0 }; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor, input_descriptor, kernel_descriptor, &batch_size, &channels, &height, &width)); std::cerr << "Output Image: " << height << " x " << width << " x " << channels << std::endl; // 卷积输出张量的描述 cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor, /*format=*/CUDNN_TENSOR_NHWC, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/image.rows, /*image_width=*/image.cols)); // 卷积算法的描述 // cudnn_tion_fwd_algo_gemm——将卷积建模为显式矩阵乘法, // cudnn_tion_fwd_algo_fft——它使用快速傅立叶变换(FFT)进行卷积或 // cudnn_tion_fwd_algo_winograd——它使用Winograd算法执行卷积。 cudnnConvolutionFwdAlgo_t convolution_algorithm; checkCUDNN( cudnnGetConvolutionForwardAlgorithm(cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, // CUDNN_CONVOLUTION_FWD_SPECIFY_​WORKSPACE_LIMIT(在内存受限的情况下,memoryLimitInBytes 设置非 0 值) /*memoryLimitInBytes=*/0, &convolution_algorithm)); // 计算 cuDNN 它的操作需要多少内存 size_t workspace_bytes{ 0 }; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB" << std::endl; assert(workspace_bytes > 0); // ************************************************************************* // 分配内存, 从 cudnnGetConvolutionForwardWorkspaceSize 计算而得 void* d_workspace{ nullptr }; cudaMalloc(&d_workspace, workspace_bytes); // 从 cudnnGetConvolution2dForwardOutputDim 计算而得 int image_bytes = batch_size * channels * height * width * sizeof(float); float* d_input{ nullptr }; cudaMalloc(&d_input, image_bytes); cudaMemcpy(d_input, image.ptr<float>(0), image_bytes, cudaMemcpyHostToDevice); float* d_output{ nullptr }; cudaMalloc(&d_output, image_bytes); cudaMemset(d_output, 0, image_bytes); // ************************************************************************* // clang-format off /* const float kernel_template[3][3] = { { 1, 1, 1 }, { 1, -8, 1 }, { 1, 1, 1 } }; */ const float kernel_template[3][3] = { { 0, 1, 0 }, { 1, -4, 1 }, { 0, 1, 0 } }; // clang-format on float h_kernel[3][3][3][3]; // NCHW for (int kernel = 0; kernel < 3; ++kernel) { for (int channel = 0; channel < 3; ++channel) { for (int row = 0; row < 3; ++row) { for (int column = 0; column < 3; ++column) { h_kernel[kernel][channel][row][column] = kernel_template[row][column]; } } } } float* d_kernel{ nullptr }; cudaMalloc(&d_kernel, sizeof(h_kernel)); cudaMemcpy(d_kernel, h_kernel, sizeof(h_kernel), cudaMemcpyHostToDevice); // ************************************************************************* const float alpha = 1.0f, beta = 0.0f; // 真正的卷积操作 !!!前向卷积 checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, d_input, kernel_descriptor, d_kernel, convolution_descriptor, convolution_algorithm, d_workspace, // 注意,如果我们选择不需要额外内存的卷积算法,d_workspace可以为nullptr。 workspace_bytes, &beta, output_descriptor, d_output)); if (with_sigmoid) { // 描述激活 cudnnActivationDescriptor_t activation_descriptor; checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor)); checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor, CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, /*relu_coef=*/0)); // 前向 sigmoid 激活函数 checkCUDNN(cudnnActivationForward(cudnn, activation_descriptor, &alpha, output_descriptor, d_output, &beta, output_descriptor, d_output)); cudnnDestroyActivationDescriptor(activation_descriptor); } float* h_output = new float[image_bytes]; cudaMemcpy(h_output, d_output, image_bytes, cudaMemcpyDeviceToHost); save_image("./cudnn-out.png", h_output, height, width); delete[] h_output; cudaFree(d_kernel); cudaFree(d_input); cudaFree(d_output); cudaFree(d_workspace); // 销毁 cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); }
825ecf651c57660c0a9b7fbe24de2db46f14c931.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <inttypes.h> void __global__ kernel0(int64_t Npart,int64_t* totalNpairs, int64_t* npairs){ int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if(i< Npart) { for(int64_t j = 0;j < Npart;j++) { totalNpairs[(i*Npart)+j]+=7; } } __syncthreads(); // the code below for reduction is incorrect and should be commented, while the commented code in the main function should be uncommented if(i==0){ for(int64_t k=0; k<Npart; k++){ for (int64_t j=0; j<Npart; j++){ npairs[j] += totalNpairs[(k*Npart)+ j]; } } } } void __global__ kernel1(int64_t *npairs, int64_t* totalNpairs, int64_t Npart){ int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if(i==0){ for(int64_t k=0; k<Npart; k++){ for (int64_t j=0; j<Npart; j++){ npairs[j] += totalNpairs[(k*Npart)+ j]; } } } } int main(int argc,char **argv){ int64_t *device_npairs; int64_t *device_totalNpairs; int64_t Npart=10000; int64_t *npairs = (int64_t *) (calloc(Npart,sizeof((*npairs)))); int64_t *totalNpairs = (int64_t *) (calloc((Npart*Npart),sizeof(( *npairs)))); int D_rows = (Npart > 1024 ) ? Npart/1024 : Npart; int D_cols = (Npart > 1024 ) ? 1024 : 1; if ( Npart % 1024 && Npart > 1024){ D_rows++; } printf("\nD_rows:%d, D_cols:%d\n",D_rows, D_cols); dim3 dimGrid(D_rows,1); dim3 dimBlock(D_cols,1); for (int64_t j=0; j<Npart; j++){ npairs[j] = 5; } hipMalloc((void **) &device_npairs,(Npart)*sizeof(int64_t)); hipMemcpy(device_npairs,npairs,(Npart)*sizeof(int64_t),hipMemcpyHostToDevice); for (int64_t j=0; j<Npart*Npart; j++){ totalNpairs[j] = 0; } hipMalloc((void **) &device_totalNpairs,(Npart*Npart)*sizeof(int64_t)); hipMemcpy(device_totalNpairs,totalNpairs,(Npart*Npart)*sizeof(int64_t),hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel0), dim3(dimGrid),dim3(dimBlock), 0, 0, Npart, device_totalNpairs, device_npairs); hipMemcpy(totalNpairs,device_totalNpairs,(Npart*Npart)*sizeof(int64_t), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( kernel1), dim3(dimGrid),dim3(dimBlock), 0, 0, device_npairs,device_totalNpairs, Npart); hipMemcpy(npairs,device_npairs,(Npart)*sizeof(int64_t), hipMemcpyDeviceToHost); hipFree(device_npairs); hipFree(device_totalNpairs); //only printing the first 100 elements for (int i = 0; i < 100; i++) { fprintf(stdout,"%lu\t ",npairs[i]); } free(npairs); free(totalNpairs); return 0; }
825ecf651c57660c0a9b7fbe24de2db46f14c931.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <inttypes.h> void __global__ kernel0(int64_t Npart,int64_t* totalNpairs, int64_t* npairs){ int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if(i< Npart) { for(int64_t j = 0;j < Npart;j++) { totalNpairs[(i*Npart)+j]+=7; } } __syncthreads(); // the code below for reduction is incorrect and should be commented, while the commented code in the main function should be uncommented if(i==0){ for(int64_t k=0; k<Npart; k++){ for (int64_t j=0; j<Npart; j++){ npairs[j] += totalNpairs[(k*Npart)+ j]; } } } } void __global__ kernel1(int64_t *npairs, int64_t* totalNpairs, int64_t Npart){ int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if(i==0){ for(int64_t k=0; k<Npart; k++){ for (int64_t j=0; j<Npart; j++){ npairs[j] += totalNpairs[(k*Npart)+ j]; } } } } int main(int argc,char **argv){ int64_t *device_npairs; int64_t *device_totalNpairs; int64_t Npart=10000; int64_t *npairs = (int64_t *) (calloc(Npart,sizeof((*npairs)))); int64_t *totalNpairs = (int64_t *) (calloc((Npart*Npart),sizeof(( *npairs)))); int D_rows = (Npart > 1024 ) ? Npart/1024 : Npart; int D_cols = (Npart > 1024 ) ? 1024 : 1; if ( Npart % 1024 && Npart > 1024){ D_rows++; } printf("\nD_rows:%d, D_cols:%d\n",D_rows, D_cols); dim3 dimGrid(D_rows,1); dim3 dimBlock(D_cols,1); for (int64_t j=0; j<Npart; j++){ npairs[j] = 5; } cudaMalloc((void **) &device_npairs,(Npart)*sizeof(int64_t)); cudaMemcpy(device_npairs,npairs,(Npart)*sizeof(int64_t),cudaMemcpyHostToDevice); for (int64_t j=0; j<Npart*Npart; j++){ totalNpairs[j] = 0; } cudaMalloc((void **) &device_totalNpairs,(Npart*Npart)*sizeof(int64_t)); cudaMemcpy(device_totalNpairs,totalNpairs,(Npart*Npart)*sizeof(int64_t),cudaMemcpyHostToDevice); kernel0<<<dimGrid,dimBlock>>>(Npart, device_totalNpairs, device_npairs); cudaMemcpy(totalNpairs,device_totalNpairs,(Npart*Npart)*sizeof(int64_t), cudaMemcpyDeviceToHost); kernel1<<<dimGrid,dimBlock>>>(device_npairs,device_totalNpairs, Npart); cudaMemcpy(npairs,device_npairs,(Npart)*sizeof(int64_t), cudaMemcpyDeviceToHost); cudaFree(device_npairs); cudaFree(device_totalNpairs); //only printing the first 100 elements for (int i = 0; i < 100; i++) { fprintf(stdout,"%lu\t ",npairs[i]); } free(npairs); free(totalNpairs); return 0; }
0e68e250e1afc8fc196e64aab5f46213ebee042a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar This file implements lower case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_BATCHED #include "ztrtri.cuh" #include "ztrtri_lower_device.cuh" /******************************************************************************/ __global__ void ztrtri_diag_lower_kernel_batched( magma_diag_t diag, int n, magmaDoubleComplex const * const * dA_array, int lda, magmaDoubleComplex **dinvA_array) { int batchid = blockIdx.z; ztrtri_diag_lower_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]); } /******************************************************************************/ __global__ void triple_zgemm16_part1_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm16_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm16_part2_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm16_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm32_part1_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm32_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm32_part2_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm32_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm64_part1_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm64_part2_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part1_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm_above64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part2_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm_above64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part3_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm_above64_part3_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } // ============================================================================= // vbatched kernels /******************************************************************************/ __global__ void ztrtri_diag_lower_kernel_vbatched( magma_diag_t diag, magma_int_t* n, magmaDoubleComplex const * const * dA_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; if(blockIdx.x >= magma_ceildiv(my_n, IB)) return; ztrtri_diag_lower_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]); } // The kernels below have 3D grids // grid.x and grid.y are independent from my_n // only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y /******************************************************************************/ __global__ void triple_zgemm16_part1_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm16_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm16_part2_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm16_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm32_part1_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm32_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm32_part2_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm32_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm64_part1_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm64_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm64_part2_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm64_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part1_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm_above64_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part2_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm_above64_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part3_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm_above64_part3_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); }
0e68e250e1afc8fc196e64aab5f46213ebee042a.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar This file implements lower case, and is called by ztrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_BATCHED #include "ztrtri.cuh" #include "ztrtri_lower_device.cuh" /******************************************************************************/ __global__ void ztrtri_diag_lower_kernel_batched( magma_diag_t diag, int n, magmaDoubleComplex const * const * dA_array, int lda, magmaDoubleComplex **dinvA_array) { int batchid = blockIdx.z; ztrtri_diag_lower_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]); } /******************************************************************************/ __global__ void triple_zgemm16_part1_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm16_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm16_part2_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm16_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm32_part1_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm32_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm32_part2_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm32_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm64_part1_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm64_part2_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part1_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm_above64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part2_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm_above64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part3_lower_kernel_batched( int n, magmaDoubleComplex const * const * Ain_array, int lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_zgemm_above64_part3_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } // ============================================================================= // vbatched kernels /******************************************************************************/ __global__ void ztrtri_diag_lower_kernel_vbatched( magma_diag_t diag, magma_int_t* n, magmaDoubleComplex const * const * dA_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; if(blockIdx.x >= magma_ceildiv(my_n, IB)) return; ztrtri_diag_lower_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]); } // The kernels below have 3D grids // grid.x and grid.y are independent from my_n // only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y /******************************************************************************/ __global__ void triple_zgemm16_part1_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm16_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm16_part2_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm16_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm32_part1_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm32_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm32_part2_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm32_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm64_part1_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm64_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm64_part2_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm64_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part1_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm_above64_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part2_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm_above64_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_zgemm_above64_part3_lower_kernel_vbatched( magma_int_t* n, magmaDoubleComplex const * const * Ain_array, magma_int_t* lda, magmaDoubleComplex **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_zgemm_above64_part3_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); }
b9b579d6f15e04d4221fe19d700abf9b5e9078a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <iostream> #include <vector> #include <time.h> #include <math.h> #include <chrono> int N; using namespace std::chrono; // Compares two arrays and print error if there is a difference. void cmp_tab(float *t1, float *t2){ for(int i=0; i<N; ++i) if(t1[i]!=t2[i]){ printf("Error at index %d: %f - %f\n", i, t1[i], t2[i]); return; } printf("Tables are identical\n"); } /** * Performs vector addition between <tab1> and <tab2> and store * result in <out> */ void add(float* tab1, float* tab2, float* out){ for (int i =0; i < N; ++i) out[i]=tab1[i]+tab2[i]; } /** * Each block performs the addition of two elements of <f1> * and <f2> and store result in <f_out> */ __global__ void cuda_add_block(float *f1, float *f2, float* f_out){ int tid = blockIdx.x; f_out[tid] = f1[tid]+f2[tid]; } /** * Each thread performs the addition of two elements of <f1> * and <f2> and store result in <f_out> */ __global__ void cuda_add_thread(float *f1, float *f2, float* f_out){ int tid = threadIdx.x; f_out[tid] = f1[tid]+f2[tid]; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: %s <N>\n", argv[0]); exit(-1); } //Retrieve the table size from args N = atoi(argv[1]); if(N>1024 || N<=0){ printf("N must belong to ]0:1024]\n"); exit(-1); } float *rnd_floats1 = (float*) malloc(N*sizeof(float)); //first random vector float *rnd_floats2 = (float*) malloc(N*sizeof(float)); //second random vector float *sum = (float*) malloc(N*sizeof(float)); //add output for CPU float *sum_cuda= (float*) malloc(N*sizeof(float)); //add output for GPU //Create table of random floats simple precision between 0 and 1 srand((unsigned)time(NULL)); for (int i =0; i < N; ++i){ rnd_floats1[i]=static_cast <float> (rand()) / static_cast <float> (RAND_MAX); rnd_floats2[i]=static_cast <float> (rand()) / static_cast <float> (RAND_MAX); } std::cout << "********************************************************" << std::endl; std::cout << " Run on CPU " << std::endl; std::cout << "********************************************************" << std::endl; auto start = high_resolution_clock::now(); add(rnd_floats1, rnd_floats2, sum); auto stop = high_resolution_clock::now(); duration<double> duration = stop - start; printf("\nTime to generate: %3.7f ms\n\n", duration.count() * 1000.0F); std::cout << "********************************************************" << std::endl; std::cout << " CUDA run on N blocks " << std::endl; std::cout << "********************************************************" << std::endl; float *dev_rnd_floats1, *dev_rnd_floats2, *dev_rnd_floats_out; hipMalloc((void**)&dev_rnd_floats1, N * sizeof(float)); hipMalloc((void**)&dev_rnd_floats2, N * sizeof(float)); hipMalloc((void**)&dev_rnd_floats_out, N * sizeof(float)); hipMemcpy(dev_rnd_floats1, rnd_floats1, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_rnd_floats2, rnd_floats2, N * sizeof(float), hipMemcpyHostToDevice); hipEvent_t cuda_start, cuda_stop; hipEventCreate( &cuda_start); hipEventCreate( &cuda_stop); hipEventRecord( cuda_start, 0 ); hipLaunchKernelGGL(( cuda_add_block), dim3(N),dim3(1), 0, 0, dev_rnd_floats1, dev_rnd_floats2, dev_rnd_floats_out); hipMemcpy(sum_cuda, dev_rnd_floats_out, N * sizeof(float),hipMemcpyDeviceToHost); hipEventRecord( cuda_stop, 0 ); hipEventSynchronize( cuda_stop ); float elapsedTime; hipEventElapsedTime( &elapsedTime, cuda_start, cuda_stop ); printf("\nParallel time to generate: %3.7f ms\n\n", elapsedTime); hipEventDestroy( cuda_start ); hipEventDestroy( cuda_stop ); hipFree( dev_rnd_floats_out ); cmp_tab(sum, sum_cuda);//compare results between CPU and GPU std::cout << "********************************************************" << std::endl; std::cout << " CUDA run on N threads " << std::endl; std::cout << "********************************************************" << std::endl; hipMalloc((void**)&dev_rnd_floats_out, N * sizeof(float)); hipEventCreate( &cuda_start); hipEventCreate( &cuda_stop); hipEventRecord( cuda_start, 0 ); hipLaunchKernelGGL(( cuda_add_thread), dim3(1),dim3(N), 0, 0, dev_rnd_floats1, dev_rnd_floats2, dev_rnd_floats_out); hipMemcpy(sum_cuda, dev_rnd_floats_out, N * sizeof(float),hipMemcpyDeviceToHost); hipEventRecord( cuda_stop, 0 ); hipEventSynchronize( cuda_stop ); hipEventElapsedTime( &elapsedTime, cuda_start, cuda_stop ); printf("\nParallel time to generate: %3.7f ms\n\n", elapsedTime); hipEventDestroy( cuda_start ); hipEventDestroy( cuda_stop ); hipFree( dev_rnd_floats1 ); hipFree( dev_rnd_floats2 ); hipFree( dev_rnd_floats_out ); cmp_tab(sum, sum_cuda);//compare results between CPU and GPU return 0; }
b9b579d6f15e04d4221fe19d700abf9b5e9078a5.cu
#include <math.h> #include <stdio.h> #include <iostream> #include <vector> #include <time.h> #include <math.h> #include <chrono> int N; using namespace std::chrono; // Compares two arrays and print error if there is a difference. void cmp_tab(float *t1, float *t2){ for(int i=0; i<N; ++i) if(t1[i]!=t2[i]){ printf("Error at index %d: %f - %f\n", i, t1[i], t2[i]); return; } printf("Tables are identical\n"); } /** * Performs vector addition between <tab1> and <tab2> and store * result in <out> */ void add(float* tab1, float* tab2, float* out){ for (int i =0; i < N; ++i) out[i]=tab1[i]+tab2[i]; } /** * Each block performs the addition of two elements of <f1> * and <f2> and store result in <f_out> */ __global__ void cuda_add_block(float *f1, float *f2, float* f_out){ int tid = blockIdx.x; f_out[tid] = f1[tid]+f2[tid]; } /** * Each thread performs the addition of two elements of <f1> * and <f2> and store result in <f_out> */ __global__ void cuda_add_thread(float *f1, float *f2, float* f_out){ int tid = threadIdx.x; f_out[tid] = f1[tid]+f2[tid]; } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: %s <N>\n", argv[0]); exit(-1); } //Retrieve the table size from args N = atoi(argv[1]); if(N>1024 || N<=0){ printf("N must belong to ]0:1024]\n"); exit(-1); } float *rnd_floats1 = (float*) malloc(N*sizeof(float)); //first random vector float *rnd_floats2 = (float*) malloc(N*sizeof(float)); //second random vector float *sum = (float*) malloc(N*sizeof(float)); //add output for CPU float *sum_cuda= (float*) malloc(N*sizeof(float)); //add output for GPU //Create table of random floats simple precision between 0 and 1 srand((unsigned)time(NULL)); for (int i =0; i < N; ++i){ rnd_floats1[i]=static_cast <float> (rand()) / static_cast <float> (RAND_MAX); rnd_floats2[i]=static_cast <float> (rand()) / static_cast <float> (RAND_MAX); } std::cout << "********************************************************" << std::endl; std::cout << " Run on CPU " << std::endl; std::cout << "********************************************************" << std::endl; auto start = high_resolution_clock::now(); add(rnd_floats1, rnd_floats2, sum); auto stop = high_resolution_clock::now(); duration<double> duration = stop - start; printf("\nTime to generate: %3.7f ms\n\n", duration.count() * 1000.0F); std::cout << "********************************************************" << std::endl; std::cout << " CUDA run on N blocks " << std::endl; std::cout << "********************************************************" << std::endl; float *dev_rnd_floats1, *dev_rnd_floats2, *dev_rnd_floats_out; cudaMalloc((void**)&dev_rnd_floats1, N * sizeof(float)); cudaMalloc((void**)&dev_rnd_floats2, N * sizeof(float)); cudaMalloc((void**)&dev_rnd_floats_out, N * sizeof(float)); cudaMemcpy(dev_rnd_floats1, rnd_floats1, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_rnd_floats2, rnd_floats2, N * sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t cuda_start, cuda_stop; cudaEventCreate( &cuda_start); cudaEventCreate( &cuda_stop); cudaEventRecord( cuda_start, 0 ); cuda_add_block<<<N,1>>>(dev_rnd_floats1, dev_rnd_floats2, dev_rnd_floats_out); cudaMemcpy(sum_cuda, dev_rnd_floats_out, N * sizeof(float),cudaMemcpyDeviceToHost); cudaEventRecord( cuda_stop, 0 ); cudaEventSynchronize( cuda_stop ); float elapsedTime; cudaEventElapsedTime( &elapsedTime, cuda_start, cuda_stop ); printf("\nParallel time to generate: %3.7f ms\n\n", elapsedTime); cudaEventDestroy( cuda_start ); cudaEventDestroy( cuda_stop ); cudaFree( dev_rnd_floats_out ); cmp_tab(sum, sum_cuda);//compare results between CPU and GPU std::cout << "********************************************************" << std::endl; std::cout << " CUDA run on N threads " << std::endl; std::cout << "********************************************************" << std::endl; cudaMalloc((void**)&dev_rnd_floats_out, N * sizeof(float)); cudaEventCreate( &cuda_start); cudaEventCreate( &cuda_stop); cudaEventRecord( cuda_start, 0 ); cuda_add_thread<<<1,N>>>(dev_rnd_floats1, dev_rnd_floats2, dev_rnd_floats_out); cudaMemcpy(sum_cuda, dev_rnd_floats_out, N * sizeof(float),cudaMemcpyDeviceToHost); cudaEventRecord( cuda_stop, 0 ); cudaEventSynchronize( cuda_stop ); cudaEventElapsedTime( &elapsedTime, cuda_start, cuda_stop ); printf("\nParallel time to generate: %3.7f ms\n\n", elapsedTime); cudaEventDestroy( cuda_start ); cudaEventDestroy( cuda_stop ); cudaFree( dev_rnd_floats1 ); cudaFree( dev_rnd_floats2 ); cudaFree( dev_rnd_floats_out ); cmp_tab(sum, sum_cuda);//compare results between CPU and GPU return 0; }
ce3a90a5a489f96b132710eb864946213e496638.hip
// !!! This is a file automatically generated by hipify!!! /* PLR - Parallelized Linear Recurrences [float] Copyright (c) 2018 Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted for academic, research, experimental, or personal use provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. For all other uses, please contact the Office for Commercialization and Industry Relations at Texas State University http://www.txstate.edu/ocir/. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Sepideh Maleki and Martin Burtscher non-recursive coefficients: (0.040353) recursive coefficients: (2.132330, -1.573120, 0.400436) */ #include <cstdio> #include <cassert> #include <hip/hip_runtime.h> typedef float T; static const int device = 0; static const int order = 3; static const int warp_size = 32; static const int block_size = 1024; static __device__ const T facA[300] = {4.004360e-01f, 8.538617e-01f, 1.190781e+00f, 1.356260e+00f, 1.360669e+00f, 1.244668e+00f, 1.056642e+00f, 8.399586e-01f, 6.272539e-01f, 4.392741e-01f, 2.862814e-01f, 1.705905e-01f, 8.930148e-02f, 3.669824e-02f, 6.081402e-03f, -9.003654e-03f, -1.407024e-02f, -1.340335e-02f, -1.005157e-02f, -5.982426e-03f, -2.311360e-03f, 4.574805e-04f, 2.215967e-03f, 3.079950e-03f, 3.264679e-03f, 3.003595e-03f, 2.502247e-03f, 1.917895e-03f, 1.355999e-03f, 8.763468e-04f, 5.035064e-04f, 2.380338e-04f, 6.641139e-05f, -3.122257e-05f, -7.573263e-05f, -8.577658e-05f, -7.627012e-05f, -5.802227e-05f, -3.808860e-05f, -2.048277e-05f, -6.992295e-06f, 2.059927e-06f, 7.190126e-06f, 9.291241e-06f, 9.325930e-06f, 8.148910e-06f, 6.425903e-06f, 4.617371e-06f, 3.000159e-06f, 1.706813e-06f, 7.688400e-07f, 1.557706e-07f, -1.938537e-07f, -3.505349e-07f, -3.801246e-07f, -3.367437e-07f, -2.604339e-07f, -1.778083e-07f, -1.042964e-07f, -4.696778e-08f, -7.280846e-09f, 1.659675e-08f, 2.803580e-08f, 3.075738e-08f, 2.812715e-08f, 2.281785e-08f, 1.672417e-08f, 1.102934e-08f, 6.346173e-09f, 2.878612e-09f, 5.714038e-10f, -7.687435e-10f, -1.385402e-09f, -1.515997e-09f, -1.361036e-09f, -1.072097e-09f, -7.520506e-10f, -4.620915e-10f, -2.315717e-10f, -6.801004e-11f, 3.423213e-11f, 8.725253e-11f, 1.049663e-10f, 1.002718e-10f, 8.362706e-11f, 6.261321e-11f, 4.210906e-11f, 2.477960e-11f, 1.166827e-11f, 2.761305e-12f, -2.544936e-12f, -5.098109e-12f, -5.761636e-12f, -5.284855e-12f, -4.246777e-12f, -3.048985e-12f, -1.936999e-12f, -1.034463e-12f, -3.796089e-13f, 4.223916e-14f, 2.730019e-13f, 3.636737e-13f, 3.629217e-13f, 3.110862e-13f, 2.380470e-13f, 1.635459e-13f, 9.882725e-14f, 4.877768e-14f, 1.403263e-14f, -7.236951e-15f, -1.797424e-14f, -2.132325e-14f, -2.009050e-14f, -1.649308e-14f, -1.210252e-14f, -7.905927e-15f, -4.423757e-15f, -1.842221e-15f, -1.349404e-16f, 8.388659e-16f, 1.263324e-15f, 1.320153e-15f, 1.163553e-15f, 9.101997e-16f, 6.390748e-16f, 3.967934e-16f, 2.052298e-16f, 6.932263e-17f, -1.614205e-17f, -6.129159e-17f, -7.754122e-17f, -7.538830e-17f, -6.331444e-17f, -4.746274e-17f, -3.179320e-17f, -1.848239e-17f, -8.401827e-18f, -1.571592e-18f, 2.464915e-18f, 4.363920e-18f, 4.798388e-18f, 4.353818e-18f, 3.482806e-18f, 2.498861e-18f, 1.592950e-18f, 8.603277e-19f, 3.292348e-19f, -1.348682e-20f, -2.021780e-19f, -2.780564e-19f, -2.802583e-19f, -2.411463e-19f, -1.846674e-19f, -1.266453e-19f, -7.610917e-20f, -3.700914e-20f, -9.900163e-21f, 6.632551e-21f, 1.489714e-20f, 1.736743e-20f, 1.625402e-20f, 1.330323e-20f, 9.751900e-21f, 6.375383e-21f, 3.580604e-21f, 1.510800e-21f, 1.417361e-22f, -6.406382e-22f, -9.840412e-22f, -1.033744e-21f, -9.128021e-22f, -7.142379e-22f, -5.009920e-22f, -3.102170e-22f, -1.593711e-22f, -5.243851e-23f, 1.467165e-23f, 4.995894e-23f, 6.245041e-23f, 6.044852e-23f, 5.065956e-23f, 3.793751e-23f, 2.540749e-23f, 1.478281e-23f, 6.744335e-24f, 1.300093e-24f, -1.917852e-24f, -3.434022e-24f, -3.784852e-24f, -3.436402e-24f, -2.748623e-24f, -1.970689e-24f, -1.254305e-24f, -6.751096e-25f, -2.555189e-25f, 1.490903e-26f, 1.634145e-25f, 2.226810e-25f, 2.237289e-25f, 1.921969e-25f, 1.470444e-25f, 1.007874e-25f, 6.055608e-26f, 2.945677e-26f, 7.908466e-27f, -5.226737e-27f, -1.179054e-26f, -1.375221e-26f, -1.286928e-26f, -1.052904e-26f, -7.713337e-27f, -5.037268e-27f, -2.823317e-27f, -1.184715e-27f, -1.018900e-28f, 5.158778e-28f, 7.859042e-28f, 8.234690e-28f, 7.261621e-28f, 5.677060e-28f, 3.979431e-28f, 2.462577e-28f, 1.264204e-28f, 4.152785e-29f, -1.171296e-29f, -3.968091e-29f, -4.955764e-29f, -4.794071e-29f, -4.015496e-29f, -3.005179e-29f, -2.010896e-29f, -1.168335e-29f, -5.312771e-30f, -1.001623e-30f, 1.543403e-30f, 2.739292e-30f, 3.012031e-30f, 2.731442e-30f, 2.182961e-30f, 1.564033e-30f, 9.947427e-31f, 5.348439e-31f, 2.019093e-31f, -1.250566e-32f, -1.301229e-31f, -1.769404e-31f, -1.776040e-31f, -1.524678e-31f, -1.165725e-31f, -7.983995e-32f, -4.791618e-32f, -2.325511e-32f, -6.180455e-33f, 4.216945e-33f, 9.402333e-33f, 1.094024e-32f, 1.022582e-32f, 8.359541e-33f, 6.119728e-33f, 3.993504e-33f, 2.235863e-33f, 9.358959e-34f, 7.750108e-35f, -4.116986e-34f, -6.250294e-34f, -6.540833e-34f, -5.763340e-34f, -4.502671e-34f, -3.153939e-34f, -1.949846e-34f, -9.992214e-35f, -3.262791e-35f, 9.537197e-36f, 3.165165e-35f, 3.942321e-35f, 3.809049e-35f, 3.187851e-35f, 2.384105e-35f, 1.594109e-35f, 9.252121e-36f, 4.198147e-36f, 7.805235e-37f, -1.234973e-36f, -2.180138e-36f, -2.393464e-36f, -2.168563e-36f, -1.731891e-36f, -1.239984e-36f, -7.879521e-37f, -4.230421e-37f, -1.590565e-37f, 0.000000e+00f, 1.038658e-37f, 1.407775e-37f, 1.411198e-37f, 1.210456e-37f, 9.248325e-38f, 6.329497e-38f, 3.794953e-38f, 1.838396e-38f}; static __device__ const T facB[307] = {-1.573120e+00f, -2.953975e+00f, -3.824143e+00f, -4.137311e+00f, -3.989155e+00f, -3.529032e+00f, -2.906369e+00f, -2.243149e+00f, -1.624217e+00f, -1.098439e+00f, -6.853842e-01f, -3.838832e-01f, -1.802289e-01f, -5.486554e-02f, 1.280957e-02f, 4.145417e-02f, 4.627284e-02f, 3.858599e-02f, 2.608508e-02f, 1.345091e-02f, 3.098036e-03f, -4.108451e-03f, -8.247929e-03f, -9.883654e-03f, -9.745402e-03f, -8.535006e-03f, -6.826533e-03f, -5.032243e-03f, -3.409170e-03f, -2.086742e-03f, -1.101681e-03f, -4.316070e-04f, -2.285797e-05f, 1.890759e-04f, 2.662995e-04f, 2.612462e-04f, 2.138548e-04f, 1.516733e-04f, 9.161064e-05f, 4.237898e-05f, 6.986898e-06f, -1.508465e-05f, -2.618662e-05f, -2.931073e-05f, -2.734590e-05f, -2.268726e-05f, -1.709540e-05f, -1.171354e-05f, -7.168809e-06f, -3.705080e-06f, -1.313579e-06f, 1.569026e-07f, 9.173381e-07f, 1.183236e-06f, 1.142797e-06f, 9.427829e-07f, 6.863776e-07f, 4.380899e-07f, 2.319221e-07f, 8.021678e-08f, -1.836572e-08f, -7.248244e-08f, -9.354329e-08f, -9.279589e-08f, -7.974120e-08f, -6.151359e-08f, -4.288362e-08f, -2.660501e-08f, -1.390185e-08f, -4.962594e-09f, 6.337801e-10f, 3.591385e-09f, 4.673803e-09f, 4.570199e-09f, 3.830839e-09f, 2.850701e-09f, 1.882337e-09f, 1.063276e-09f, 4.476357e-10f, 3.560213e-11f, -2.024952e-10f, -3.085435e-10f, -3.251110e-10f, -2.889544e-10f, -2.282593e-10f, -1.623504e-10f, -1.028131e-10f, -5.523802e-11f, -2.105931e-11f, 8.205728e-13f, 1.275926e-11f, 1.748318e-11f, 1.753665e-11f, 1.500006e-11f, 1.139870e-11f, 7.731212e-12f, 4.560532e-12f, 2.126885e-12f, 4.568121e-13f, -5.455694e-13f, -1.030273e-12f, -1.155712e-12f, -1.062082e-12f, -8.591936e-13f, -6.240907e-13f, -4.044484e-13f, -2.247000e-13f, -9.279703e-14f, -6.349494e-15f, 4.246365e-14f, 6.337576e-14f, 6.579505e-14f, 5.760304e-14f, 4.470313e-14f, 3.105203e-14f, 1.895612e-14f, 9.472874e-15f, 2.813390e-15f, -1.312176e-15f, -3.430513e-15f, -4.124192e-15f, -3.922972e-15f, -3.250923e-15f, -2.412210e-15f, -1.600436e-15f, -9.197477e-16f, -4.094635e-16f, -6.711000e-17f, 1.327344e-16f, 2.246417e-16f, 2.433299e-16f, 2.186228e-16f, 1.733436e-16f, 1.231438e-16f, 7.743747e-17f, 4.081522e-17f, 1.452430e-17f, -2.227892e-18f, -1.125518e-17f, -1.467896e-17f, -1.448677e-17f, -1.230578e-17f, -9.328549e-18f, -6.334096e-18f, -3.759134e-18f, -1.786908e-18f, -4.331085e-19f, 3.821979e-19f, 7.807613e-19f, 8.901652e-19f, 8.229406e-19f, 6.670891e-19f, 4.843241e-19f, 3.128625e-19f, 1.723527e-19f, 6.928337e-20f, 1.884938e-21f, -3.595551e-20f, -5.189069e-20f, -5.333094e-20f, -4.648677e-20f, -3.600806e-20f, -2.500742e-20f, -1.529405e-20f, -7.691217e-21f, -2.354704e-21f, 9.539129e-22f, 2.658448e-21f, 3.225160e-21f, 3.077030e-21f, 2.552217e-21f, 1.893101e-21f, 1.253928e-21f, 7.177111e-22f, 3.158841e-22f, 4.664129e-23f, -1.100716e-22f, -1.815900e-22f, -1.953771e-22f, -1.750222e-22f, -1.385686e-22f, -9.837913e-23f, -6.187691e-23f, -3.266768e-23f, -1.171301e-23f, 1.636430e-24f, 8.834065e-24f, 1.157253e-23f, 1.143469e-23f, 9.715038e-24f, 7.361581e-24f, 4.993261e-24f, 2.956881e-24f, 1.397890e-24f, 3.287158e-25f, -3.140768e-25f, -6.270591e-25f, -7.113869e-25f, -6.562400e-25f, -5.313204e-25f, -3.854710e-25f, -2.489028e-25f, -1.371105e-25f, -5.516745e-26f, -1.613523e-27f, 2.844046e-26f, 4.109167e-26f, 4.223464e-26f, 3.680464e-26f, 2.849406e-26f, 1.977289e-26f, 1.207565e-26f, 6.054199e-27f, 1.830872e-27f, -7.844301e-28f, -2.128526e-27f, -2.571570e-27f, -2.449123e-27f, -2.029288e-27f, -1.504097e-27f, -9.956340e-28f, -5.694955e-28f, -2.503949e-28f, -3.672758e-29f, 8.753944e-29f, 1.441727e-28f, 1.550067e-28f, 1.387783e-28f, 1.098091e-28f, 7.790441e-29f, 4.894691e-29f, 2.578950e-29f, 9.188076e-30f, -1.377857e-30f, -7.064948e-30f, -9.218029e-30f, -9.093611e-30f, -7.718574e-30f, -5.844435e-30f, -3.961432e-30f, -2.343876e-30f, -1.106432e-30f, -2.583804e-31f, 2.510263e-31f, 4.986788e-31f, 5.649886e-31f, 5.207803e-31f, 4.213696e-31f, 3.054908e-31f, 1.970814e-31f, 1.084005e-31f, 4.344237e-32f, 1.024996e-33f, -2.274697e-32f, -3.272060e-32f, -3.357696e-32f, -2.923243e-32f, -2.261512e-32f, -1.568220e-32f, -9.569041e-33f, -4.790280e-33f, -1.440925e-33f, 6.313694e-34f, 1.694835e-33f, 2.043729e-33f, 1.944549e-33f, 1.610063e-33f, 1.192558e-33f, 7.887731e-34f, 4.506149e-34f, 1.975681e-34f, 2.826219e-35f, -7.009156e-35f, -1.148048e-34f, -1.232220e-34f, -1.102155e-34f, -8.714478e-35f, -6.178172e-35f, -3.878409e-35f, -2.040632e-35f, -7.240612e-36f, 1.131673e-36f, 5.632026e-36f, 7.329679e-36f, 7.222605e-36f, 6.125777e-36f, 4.635221e-36f, 3.139429e-36f, 1.855523e-36f, 8.739973e-37f, 2.018311e-37f, -2.015140e-37f, -3.972188e-37f, -4.491755e-37f, -4.136110e-37f, -3.344088e-37f, -2.422763e-37f, -1.561725e-37f, -8.579103e-38f, -3.427281e-38f, 0.000000e+00f, 1.815737e-38f, 2.602928e-38f, 2.667561e-38f, 2.320490e-38f, 1.793961e-38f, 1.243096e-38f}; static __device__ const T facC[306] = {2.132330e+00f, 2.973711e+00f, 3.386958e+00f, 3.397969e+00f, 3.108282e+00f, 2.638729e+00f, 2.097609e+00f, 1.566426e+00f, 1.096988e+00f, 7.149222e-01f, 4.260101e-01f, 2.230090e-01f, 9.164453e-02f, 1.518613e-02f, -2.248514e-02f, -3.513759e-02f, -3.347203e-02f, -2.510162e-02f, -1.493979e-02f, -5.772089e-03f, 1.142484e-03f, 5.533914e-03f, 7.691513e-03f, 8.152826e-03f, 7.500819e-03f, 6.248807e-03f, 4.789514e-03f, 3.386298e-03f, 2.188473e-03f, 1.257386e-03f, 5.944275e-04f, 1.658399e-04f, -7.797778e-05f, -1.891303e-04f, -2.142115e-04f, -1.904701e-04f, -1.448993e-04f, -9.511872e-05f, -5.115166e-05f, -1.746192e-05f, 5.144146e-06f, 1.795575e-05f, 2.320285e-05f, 2.328948e-05f, 2.035011e-05f, 1.604727e-05f, 1.153085e-05f, 7.492218e-06f, 4.262369e-06f, 1.919989e-06f, 3.889861e-07f, -4.841206e-07f, -8.753940e-07f, -9.492850e-07f, -8.409482e-07f, -6.503792e-07f, -4.440385e-07f, -2.604581e-07f, -1.172918e-07f, -1.818209e-08f, 4.144710e-08f, 7.001364e-08f, 7.681015e-08f, 7.024165e-08f, 5.698277e-08f, 4.176507e-08f, 2.754347e-08f, 1.584824e-08f, 7.188746e-09f, 1.426986e-09f, -1.919747e-09f, -3.459722e-09f, -3.785860e-09f, -3.398880e-09f, -2.677319e-09f, -1.878075e-09f, -1.153967e-09f, -5.782951e-10f, -1.698363e-10f, 8.549059e-11f, 2.178968e-10f, 2.621324e-10f, 2.504084e-10f, 2.088414e-10f, 1.563635e-10f, 1.051586e-10f, 6.188185e-11f, 2.913903e-11f, 6.895734e-12f, -6.355489e-12f, -1.273150e-11f, -1.438851e-11f, -1.319784e-11f, -1.060546e-11f, -7.614215e-12f, -4.837256e-12f, -2.583358e-12f, -9.479934e-13f, 1.054855e-13f, 6.817679e-13f, 9.082020e-13f, 9.063237e-13f, 7.768749e-13f, 5.944743e-13f, 4.084228e-13f, 2.468013e-13f, 1.218128e-13f, 3.504411e-14f, -1.807236e-14f, -4.488659e-14f, -5.325012e-14f, -5.017165e-14f, -4.118789e-14f, -3.022341e-14f, -1.974333e-14f, -1.104735e-14f, -4.600510e-15f, -3.369395e-16f, 2.094932e-15f, 3.154922e-15f, 3.296833e-15f, 2.905751e-15f, 2.273050e-15f, 1.595968e-15f, 9.909176e-16f, 5.125250e-16f, 1.731233e-16f, -4.030834e-17f, -1.530609e-16f, -1.936417e-16f, -1.882657e-16f, -1.581142e-16f, -1.185281e-16f, -7.939688e-17f, -4.615600e-17f, -2.098193e-17f, -3.924826e-18f, 6.155537e-18f, 1.089794e-17f, 1.198296e-17f, 1.087276e-17f, 8.697606e-18f, 6.240416e-18f, 3.978091e-18f, 2.148516e-18f, 8.222165e-19f, -3.366491e-20f, -5.048869e-19f, -6.943814e-19f, -6.998834e-19f, -6.022118e-19f, -4.611691e-19f, -3.162718e-19f, -1.900688e-19f, -9.242456e-20f, -2.472529e-20f, 1.656206e-20f, 3.720150e-20f, 4.337087e-20f, 4.059063e-20f, 3.322184e-20f, 2.435326e-20f, 1.592120e-20f, 8.941867e-21f, 3.772978e-21f, 3.540264e-22f, -1.599801e-21f, -2.457393e-21f, -2.581529e-21f, -2.279515e-21f, -1.783653e-21f, -1.251122e-21f, -7.747060e-22f, -3.980020e-22f, -1.309605e-22f, 3.663374e-23f, 1.247575e-22f, 1.559535e-22f, 1.509554e-22f, 1.265105e-22f, 9.474053e-23f, 6.344987e-23f, 3.691719e-23f, 1.684289e-23f, 3.247049e-24f, -4.789137e-24f, -8.575518e-24f, -9.451713e-24f, -8.581595e-24f, -6.864059e-24f, -4.921364e-24f, -3.132364e-24f, -1.685954e-24f, -6.381163e-25f, 3.722132e-26f, 4.080850e-25f, 5.560936e-25f, 5.587130e-25f, 4.799704e-25f, 3.672125e-25f, 2.516961e-25f, 1.512271e-25f, 7.356310e-26f, 1.975059e-26f, -1.305203e-26f, -2.944397e-26f, -3.434300e-26f, -3.213821e-26f, -2.629404e-26f, -1.926248e-26f, -1.257958e-26f, -7.050697e-27f, -2.958621e-27f, -2.544781e-28f, 1.288281e-27f, 1.962626e-27f, 2.056444e-27f, 1.813445e-27f, 1.417736e-27f, 9.937881e-28f, 6.149845e-28f, 3.157142e-28f, 1.037110e-28f, -2.924826e-29f, -9.909349e-29f, -1.237594e-28f, -1.197219e-28f, -1.002788e-28f, -7.504839e-29f, -5.021822e-29f, -2.917697e-29f, -1.326770e-29f, -2.501428e-30f, 3.854305e-30f, 6.840831e-30f, 7.521963e-30f, 6.821262e-30f, 5.451546e-30f, 3.905896e-30f, 2.484202e-30f, 1.335691e-30f, 5.042475e-31f, -3.121662e-32f, -3.249470e-31f, -4.418680e-31f, -4.435279e-31f, -3.807570e-31f, -2.911168e-31f, -1.993852e-31f, -1.196621e-31f, -5.807598e-32f, -1.543526e-32f, 1.053038e-32f, 2.348006e-32f, 2.732085e-32f, 2.553685e-32f, 2.087628e-32f, 1.528284e-32f, 9.973037e-33f, 5.583677e-33f, 2.337258e-33f, 1.935753e-34f, -1.028116e-33f, -1.560877e-33f, -1.633441e-33f, -1.439283e-33f, -1.124459e-33f, -7.876404e-34f, -4.869416e-34f, -2.495410e-34f, -8.148575e-35f, 2.381446e-35f, 7.904196e-35f, 9.845071e-35f, 9.512305e-35f, 7.961020e-35f, 5.953846e-35f, 3.980992e-35f, 2.310555e-35f, 1.048421e-35f, 1.949313e-36f, -3.084043e-36f, -5.444446e-36f, -5.977210e-36f, -5.415579e-36f, -4.325085e-36f, -3.096643e-36f, -1.967779e-36f, -1.056483e-36f, -3.972256e-37f, 2.698968e-38f, 2.593803e-37f, 3.515631e-37f, 3.524196e-37f, 3.022893e-37f, 2.309607e-37f, 1.580685e-37f, 9.477289e-38f, 4.591126e-38f, 1.210517e-38f, 0.000000e+00f, -1.870049e-38f, -2.171775e-38f, -2.027947e-38f, -1.656624e-38f, -1.211922e-38f}; // shared memory size is 15760 bytes static __device__ unsigned int counter = 0; static __global__ __launch_bounds__(block_size, 2) void Recurrence1(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 1; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; } val0 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; } } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val0; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; } else { output[offs + (0 * block_size)] = val0; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence2(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 2; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val1; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence3(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 3; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val2; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence4(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 4; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val3; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence5(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 5; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val4; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence6(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 6; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4, val5; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; val5 = 0; if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; val5 = input[offs + (5 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; val5 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 2); if (cond) val5 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 4); if (cond) val5 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 4); if (cond) val5 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 8); if (cond) val5 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 2, 8); if (cond) val5 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 3, 8); if (cond) val5 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 5, 16); if (cond) val5 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 6, 16); if (cond) val5 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 7, 16); if (cond) val5 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 13, 32); if (cond) val5 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 14, 32); if (cond) val5 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 15, 32); if (cond) val5 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; spartc[clane + (15 * order + 5 * delta)] = val5; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val5 += sfacB[tid] * spartc[31 * order + (4 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; val5 += sfacC[tid] * spartc[31 * order + (4 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; spartc[clane + (31 * order + 5 * delta)] = val5; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val5; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; output[offs + (5 * block_size)] = val5; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence7(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 7; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4, val5, val6; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; val5 = 0; if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)]; val6 = 0; if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; val5 = input[offs + (5 * block_size)]; val6 = input[offs + (6 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; val5 *= 4.035270e-02f; val6 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 2); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 2); if (cond) val6 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 4); if (cond) val6 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 4); if (cond) val6 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 8); if (cond) val6 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 2, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 2, 8); if (cond) val6 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 3, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 3, 8); if (cond) val6 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 5, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 5, 16); if (cond) val6 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 6, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 6, 16); if (cond) val6 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 7, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 7, 16); if (cond) val6 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 13, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 13, 32); if (cond) val6 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 14, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 14, 32); if (cond) val6 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 15, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 15, 32); if (cond) val6 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; spartc[clane + (15 * order + 5 * delta)] = val5; spartc[clane + (15 * order + 6 * delta)] = val6; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; spartc[clane + (31 * order + 6 * delta)] = val6; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val5 += sfacB[tid] * spartc[31 * order + (4 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; val5 += sfacC[tid] * spartc[31 * order + (4 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; spartc[clane + (31 * order + 5 * delta)] = val5; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val6 += sfacB[tid] * spartc[31 * order + (5 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; val6 += sfacC[tid] * spartc[31 * order + (5 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val6; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5; if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; output[offs + (5 * block_size)] = val5; output[offs + (6 * block_size)] = val6; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence8(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 8; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4, val5, val6, val7; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; val5 = 0; if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)]; val6 = 0; if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)]; val7 = 0; if (offs + (7 * block_size) < items) val7 = input[offs + (7 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; val5 = input[offs + (5 * block_size)]; val6 = input[offs + (6 * block_size)]; val7 = input[offs + (7 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; val5 *= 4.035270e-02f; val6 *= 4.035270e-02f; val7 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 2); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 2); if (cond) val6 += spc; spc = help * __shfl(val7, 0, 2); if (cond) val7 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 4); if (cond) val6 += spc; spc = help * __shfl(val7, 0, 4); if (cond) val7 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 4); if (cond) val6 += spc; spc = help * __shfl(val7, 1, 4); if (cond) val7 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 1, 8); if (cond) val7 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 2, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 2, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 2, 8); if (cond) val7 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 3, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 3, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 3, 8); if (cond) val7 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 5, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 5, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 5, 16); if (cond) val7 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 6, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 6, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 6, 16); if (cond) val7 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 7, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 7, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 7, 16); if (cond) val7 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 13, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 13, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 13, 32); if (cond) val7 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 14, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 14, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 14, 32); if (cond) val7 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 15, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 15, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 15, 32); if (cond) val7 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; spartc[clane + (15 * order + 5 * delta)] = val5; spartc[clane + (15 * order + 6 * delta)] = val6; spartc[clane + (15 * order + 7 * delta)] = val7; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; spartc[clane + (31 * order + 6 * delta)] = val6; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)]; val7 += sfacA[tid] * spartc[31 * order + (6 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val5 += sfacB[tid] * spartc[31 * order + (4 * delta + 1)]; val7 += sfacB[tid] * spartc[31 * order + (6 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; val5 += sfacC[tid] * spartc[31 * order + (4 * delta + 2)]; val7 += sfacC[tid] * spartc[31 * order + (6 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; spartc[clane + (31 * order + 5 * delta)] = val5; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val6 += sfacB[tid] * spartc[31 * order + (5 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; val6 += sfacC[tid] * spartc[31 * order + (5 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val7; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5; if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6; if (offs + (7 * block_size) < items) output[offs + (7 * block_size)] = val7; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; output[offs + (5 * block_size)] = val5; output[offs + (6 * block_size)] = val6; output[offs + (7 * block_size)] = val7; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence9(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 9; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4, val5, val6, val7, val8; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; val5 = 0; if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)]; val6 = 0; if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)]; val7 = 0; if (offs + (7 * block_size) < items) val7 = input[offs + (7 * block_size)]; val8 = 0; if (offs + (8 * block_size) < items) val8 = input[offs + (8 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; val5 = input[offs + (5 * block_size)]; val6 = input[offs + (6 * block_size)]; val7 = input[offs + (7 * block_size)]; val8 = input[offs + (8 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; val5 *= 4.035270e-02f; val6 *= 4.035270e-02f; val7 *= 4.035270e-02f; val8 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 2); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 2); if (cond) val6 += spc; spc = help * __shfl(val7, 0, 2); if (cond) val7 += spc; spc = help * __shfl(val8, 0, 2); if (cond) val8 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 4); if (cond) val6 += spc; spc = help * __shfl(val7, 0, 4); if (cond) val7 += spc; spc = help * __shfl(val8, 0, 4); if (cond) val8 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 4); if (cond) val6 += spc; spc = help * __shfl(val7, 1, 4); if (cond) val7 += spc; spc = help * __shfl(val8, 1, 4); if (cond) val8 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 1, 8); if (cond) val7 += spc; spc = help * __shfl(val8, 1, 8); if (cond) val8 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 2, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 2, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 2, 8); if (cond) val7 += spc; spc = help * __shfl(val8, 2, 8); if (cond) val8 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 3, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 3, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 3, 8); if (cond) val7 += spc; spc = help * __shfl(val8, 3, 8); if (cond) val8 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 5, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 5, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 5, 16); if (cond) val7 += spc; spc = help * __shfl(val8, 5, 16); if (cond) val8 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 6, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 6, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 6, 16); if (cond) val7 += spc; spc = help * __shfl(val8, 6, 16); if (cond) val8 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 7, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 7, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 7, 16); if (cond) val7 += spc; spc = help * __shfl(val8, 7, 16); if (cond) val8 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 13, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 13, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 13, 32); if (cond) val7 += spc; spc = help * __shfl(val8, 13, 32); if (cond) val8 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 14, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 14, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 14, 32); if (cond) val7 += spc; spc = help * __shfl(val8, 14, 32); if (cond) val8 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 15, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 15, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 15, 32); if (cond) val7 += spc; spc = help * __shfl(val8, 15, 32); if (cond) val8 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; spartc[clwo + 8 * delta] = val8; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; spartc[clwo + 8 * delta] = val8; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; spartc[clwo + 8 * delta] = val8; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; spartc[clwo + 8 * delta] = val8; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; spartc[clane + (15 * order + 5 * delta)] = val5; spartc[clane + (15 * order + 6 * delta)] = val6; spartc[clane + (15 * order + 7 * delta)] = val7; spartc[clane + (15 * order + 8 * delta)] = val8; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; spartc[clane + (31 * order + 6 * delta)] = val6; spartc[clane + (31 * order + 8 * delta)] = val8; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)]; val7 += sfacA[tid] * spartc[31 * order + (6 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val5 += sfacB[tid] * spartc[31 * order + (4 * delta + 1)]; val7 += sfacB[tid] * spartc[31 * order + (6 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; val5 += sfacC[tid] * spartc[31 * order + (4 * delta + 2)]; val7 += sfacC[tid] * spartc[31 * order + (6 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; spartc[clane + (31 * order + 5 * delta)] = val5; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val6 += sfacB[tid] * spartc[31 * order + (5 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; val6 += sfacC[tid] * spartc[31 * order + (5 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 7 * delta)] = val7; } __syncthreads(); if (warp < 10) { val8 += sfacA[tid] * spartc[31 * order + (7 * delta + 0)]; val8 += sfacB[tid] * spartc[31 * order + (7 * delta + 1)]; val8 += sfacC[tid] * spartc[31 * order + (7 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val8; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5; if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6; if (offs + (7 * block_size) < items) output[offs + (7 * block_size)] = val7; if (offs + (8 * block_size) < items) output[offs + (8 * block_size)] = val8; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; output[offs + (5 * block_size)] = val5; output[offs + (6 * block_size)] = val6; output[offs + (7 * block_size)] = val7; output[offs + (8 * block_size)] = val8; } } struct GPUTimer { hipEvent_t beg, end; GPUTimer() {hipEventCreate(&beg); hipEventCreate(&end);} ~GPUTimer() {hipEventDestroy(beg); hipEventDestroy(end);} void start() {hipEventRecord(beg, 0);} double stop() {hipEventRecord(end, 0); hipEventSynchronize(end); float ms; hipEventElapsedTime(&ms, beg, end); return 0.001 * ms;} }; int main(int argc, char *argv[]) { printf("Parallel Linear Recurrence Computation\n"); printf("Copyright (c) 2018 Texas State University\n"); if (argc != 2) { fprintf(stderr, "USAGE: %s problem_size\n", argv[0]); return -1; } const int n = atoi(argv[1]); if (n < 1) {fprintf(stderr, "ERROR: problem_size must be at least 1\n"); return -1;}; int *d_status; T *h_in, *h_out, *h_sol, *d_in, *d_out, *d_partcarry, *d_fullcarry; const size_t size = n * sizeof(T); h_in = (T *)malloc(size); assert(h_in != NULL); h_out = (T *)malloc(size); assert(h_out != NULL); h_sol = (T *)malloc(size); assert(h_sol != NULL); for (int i = 0; i < n; i++) { h_in[i] = (i & 32) / 16 - 1; h_sol[i] = 0; } for (int i = 0; i < n; i++) { if ((i - 0) >= 0) { h_sol[i] += 4.035270e-02f * h_in[i - 0]; } } for (int i = 1; i < n; i++) { if ((i - 1) >= 0) { h_sol[i] += 2.132330e+00f * h_sol[i - 1]; } if ((i - 2) >= 0) { h_sol[i] += -1.573120e+00f * h_sol[i - 2]; } if ((i - 3) >= 0) { h_sol[i] += 4.004360e-01f * h_sol[i - 3]; } } hipSetDevice(device); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); const int SMs = deviceProp.multiProcessorCount; int valsperthread = 1; while ((valsperthread < 9) && (block_size * 2 * SMs * valsperthread < n)) { valsperthread++; } const int chunk_size = valsperthread * block_size; const int iterations = 5; assert(hipSuccess == hipMalloc(&d_in, size)); assert(hipSuccess == hipMalloc(&d_out, size)); assert(hipSuccess == hipMalloc(&d_status, (n + chunk_size - 1) / chunk_size * sizeof(int))); assert(hipSuccess == hipMalloc(&d_partcarry, (n + chunk_size - 1) / chunk_size * order * sizeof(T))); assert(hipSuccess == hipMalloc(&d_fullcarry, (n + chunk_size - 1) / chunk_size * order * sizeof(T))); assert(hipSuccess == hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice)); assert(hipSuccess == hipMemcpy(d_out, d_in, size, hipMemcpyDeviceToDevice)); hipMemset(d_status, 0, (n + chunk_size - 1) / chunk_size * sizeof(int)); switch (valsperthread) { case 1: hipLaunchKernelGGL(( Recurrence1), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 2: hipLaunchKernelGGL(( Recurrence2), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 3: hipLaunchKernelGGL(( Recurrence3), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 4: hipLaunchKernelGGL(( Recurrence4), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 5: hipLaunchKernelGGL(( Recurrence5), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 6: hipLaunchKernelGGL(( Recurrence6), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 7: hipLaunchKernelGGL(( Recurrence7), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 8: hipLaunchKernelGGL(( Recurrence8), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 9: hipLaunchKernelGGL(( Recurrence9), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; } GPUTimer timer; timer.start(); for (long i = 0; i < iterations; i++) { hipMemset(d_status, 0, (n + chunk_size - 1) / chunk_size * sizeof(int)); switch (valsperthread) { case 1: hipLaunchKernelGGL(( Recurrence1), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 2: hipLaunchKernelGGL(( Recurrence2), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 3: hipLaunchKernelGGL(( Recurrence3), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 4: hipLaunchKernelGGL(( Recurrence4), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 5: hipLaunchKernelGGL(( Recurrence5), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 6: hipLaunchKernelGGL(( Recurrence6), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 7: hipLaunchKernelGGL(( Recurrence7), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 8: hipLaunchKernelGGL(( Recurrence8), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 9: hipLaunchKernelGGL(( Recurrence9), dim3((n + chunk_size - 1) / chunk_size), dim3(block_size), 0, 0, n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; } } double runtime = timer.stop() / iterations; double throughput = 0.000000001 * n / runtime; assert(hipSuccess == hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost)); for (int i = 0; i < n; i++) { T s = h_sol[i]; T o = h_out[i]; if (fabsf(o - s) > 0.001) { printf("result not correct at index %d: %e != %e\n", i, h_sol[i], h_out[i]); return -1; } } printf("size = %d\tthroughput = %7.4f gigaitems/s\truntime = %7.4f s\tPassed!\n", n, throughput, runtime); printf("first elements of result are:\n"); for (int i = 0; (i < 8) && (i < n); i++) { printf(" %f", h_out[i]); } printf("\n"); free(h_in); free(h_out); free(h_sol); hipFree(d_in); hipFree(d_out); hipFree(d_status); hipFree(d_partcarry); hipFree(d_fullcarry); return 0; }
ce3a90a5a489f96b132710eb864946213e496638.cu
/* PLR - Parallelized Linear Recurrences [float] Copyright (c) 2018 Texas State University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted for academic, research, experimental, or personal use provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Texas State University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. For all other uses, please contact the Office for Commercialization and Industry Relations at Texas State University http://www.txstate.edu/ocir/. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Sepideh Maleki and Martin Burtscher non-recursive coefficients: (0.040353) recursive coefficients: (2.132330, -1.573120, 0.400436) */ #include <cstdio> #include <cassert> #include <cuda.h> typedef float T; static const int device = 0; static const int order = 3; static const int warp_size = 32; static const int block_size = 1024; static __device__ const T facA[300] = {4.004360e-01f, 8.538617e-01f, 1.190781e+00f, 1.356260e+00f, 1.360669e+00f, 1.244668e+00f, 1.056642e+00f, 8.399586e-01f, 6.272539e-01f, 4.392741e-01f, 2.862814e-01f, 1.705905e-01f, 8.930148e-02f, 3.669824e-02f, 6.081402e-03f, -9.003654e-03f, -1.407024e-02f, -1.340335e-02f, -1.005157e-02f, -5.982426e-03f, -2.311360e-03f, 4.574805e-04f, 2.215967e-03f, 3.079950e-03f, 3.264679e-03f, 3.003595e-03f, 2.502247e-03f, 1.917895e-03f, 1.355999e-03f, 8.763468e-04f, 5.035064e-04f, 2.380338e-04f, 6.641139e-05f, -3.122257e-05f, -7.573263e-05f, -8.577658e-05f, -7.627012e-05f, -5.802227e-05f, -3.808860e-05f, -2.048277e-05f, -6.992295e-06f, 2.059927e-06f, 7.190126e-06f, 9.291241e-06f, 9.325930e-06f, 8.148910e-06f, 6.425903e-06f, 4.617371e-06f, 3.000159e-06f, 1.706813e-06f, 7.688400e-07f, 1.557706e-07f, -1.938537e-07f, -3.505349e-07f, -3.801246e-07f, -3.367437e-07f, -2.604339e-07f, -1.778083e-07f, -1.042964e-07f, -4.696778e-08f, -7.280846e-09f, 1.659675e-08f, 2.803580e-08f, 3.075738e-08f, 2.812715e-08f, 2.281785e-08f, 1.672417e-08f, 1.102934e-08f, 6.346173e-09f, 2.878612e-09f, 5.714038e-10f, -7.687435e-10f, -1.385402e-09f, -1.515997e-09f, -1.361036e-09f, -1.072097e-09f, -7.520506e-10f, -4.620915e-10f, -2.315717e-10f, -6.801004e-11f, 3.423213e-11f, 8.725253e-11f, 1.049663e-10f, 1.002718e-10f, 8.362706e-11f, 6.261321e-11f, 4.210906e-11f, 2.477960e-11f, 1.166827e-11f, 2.761305e-12f, -2.544936e-12f, -5.098109e-12f, -5.761636e-12f, -5.284855e-12f, -4.246777e-12f, -3.048985e-12f, -1.936999e-12f, -1.034463e-12f, -3.796089e-13f, 4.223916e-14f, 2.730019e-13f, 3.636737e-13f, 3.629217e-13f, 3.110862e-13f, 2.380470e-13f, 1.635459e-13f, 9.882725e-14f, 4.877768e-14f, 1.403263e-14f, -7.236951e-15f, -1.797424e-14f, -2.132325e-14f, -2.009050e-14f, -1.649308e-14f, -1.210252e-14f, -7.905927e-15f, -4.423757e-15f, -1.842221e-15f, -1.349404e-16f, 8.388659e-16f, 1.263324e-15f, 1.320153e-15f, 1.163553e-15f, 9.101997e-16f, 6.390748e-16f, 3.967934e-16f, 2.052298e-16f, 6.932263e-17f, -1.614205e-17f, -6.129159e-17f, -7.754122e-17f, -7.538830e-17f, -6.331444e-17f, -4.746274e-17f, -3.179320e-17f, -1.848239e-17f, -8.401827e-18f, -1.571592e-18f, 2.464915e-18f, 4.363920e-18f, 4.798388e-18f, 4.353818e-18f, 3.482806e-18f, 2.498861e-18f, 1.592950e-18f, 8.603277e-19f, 3.292348e-19f, -1.348682e-20f, -2.021780e-19f, -2.780564e-19f, -2.802583e-19f, -2.411463e-19f, -1.846674e-19f, -1.266453e-19f, -7.610917e-20f, -3.700914e-20f, -9.900163e-21f, 6.632551e-21f, 1.489714e-20f, 1.736743e-20f, 1.625402e-20f, 1.330323e-20f, 9.751900e-21f, 6.375383e-21f, 3.580604e-21f, 1.510800e-21f, 1.417361e-22f, -6.406382e-22f, -9.840412e-22f, -1.033744e-21f, -9.128021e-22f, -7.142379e-22f, -5.009920e-22f, -3.102170e-22f, -1.593711e-22f, -5.243851e-23f, 1.467165e-23f, 4.995894e-23f, 6.245041e-23f, 6.044852e-23f, 5.065956e-23f, 3.793751e-23f, 2.540749e-23f, 1.478281e-23f, 6.744335e-24f, 1.300093e-24f, -1.917852e-24f, -3.434022e-24f, -3.784852e-24f, -3.436402e-24f, -2.748623e-24f, -1.970689e-24f, -1.254305e-24f, -6.751096e-25f, -2.555189e-25f, 1.490903e-26f, 1.634145e-25f, 2.226810e-25f, 2.237289e-25f, 1.921969e-25f, 1.470444e-25f, 1.007874e-25f, 6.055608e-26f, 2.945677e-26f, 7.908466e-27f, -5.226737e-27f, -1.179054e-26f, -1.375221e-26f, -1.286928e-26f, -1.052904e-26f, -7.713337e-27f, -5.037268e-27f, -2.823317e-27f, -1.184715e-27f, -1.018900e-28f, 5.158778e-28f, 7.859042e-28f, 8.234690e-28f, 7.261621e-28f, 5.677060e-28f, 3.979431e-28f, 2.462577e-28f, 1.264204e-28f, 4.152785e-29f, -1.171296e-29f, -3.968091e-29f, -4.955764e-29f, -4.794071e-29f, -4.015496e-29f, -3.005179e-29f, -2.010896e-29f, -1.168335e-29f, -5.312771e-30f, -1.001623e-30f, 1.543403e-30f, 2.739292e-30f, 3.012031e-30f, 2.731442e-30f, 2.182961e-30f, 1.564033e-30f, 9.947427e-31f, 5.348439e-31f, 2.019093e-31f, -1.250566e-32f, -1.301229e-31f, -1.769404e-31f, -1.776040e-31f, -1.524678e-31f, -1.165725e-31f, -7.983995e-32f, -4.791618e-32f, -2.325511e-32f, -6.180455e-33f, 4.216945e-33f, 9.402333e-33f, 1.094024e-32f, 1.022582e-32f, 8.359541e-33f, 6.119728e-33f, 3.993504e-33f, 2.235863e-33f, 9.358959e-34f, 7.750108e-35f, -4.116986e-34f, -6.250294e-34f, -6.540833e-34f, -5.763340e-34f, -4.502671e-34f, -3.153939e-34f, -1.949846e-34f, -9.992214e-35f, -3.262791e-35f, 9.537197e-36f, 3.165165e-35f, 3.942321e-35f, 3.809049e-35f, 3.187851e-35f, 2.384105e-35f, 1.594109e-35f, 9.252121e-36f, 4.198147e-36f, 7.805235e-37f, -1.234973e-36f, -2.180138e-36f, -2.393464e-36f, -2.168563e-36f, -1.731891e-36f, -1.239984e-36f, -7.879521e-37f, -4.230421e-37f, -1.590565e-37f, 0.000000e+00f, 1.038658e-37f, 1.407775e-37f, 1.411198e-37f, 1.210456e-37f, 9.248325e-38f, 6.329497e-38f, 3.794953e-38f, 1.838396e-38f}; static __device__ const T facB[307] = {-1.573120e+00f, -2.953975e+00f, -3.824143e+00f, -4.137311e+00f, -3.989155e+00f, -3.529032e+00f, -2.906369e+00f, -2.243149e+00f, -1.624217e+00f, -1.098439e+00f, -6.853842e-01f, -3.838832e-01f, -1.802289e-01f, -5.486554e-02f, 1.280957e-02f, 4.145417e-02f, 4.627284e-02f, 3.858599e-02f, 2.608508e-02f, 1.345091e-02f, 3.098036e-03f, -4.108451e-03f, -8.247929e-03f, -9.883654e-03f, -9.745402e-03f, -8.535006e-03f, -6.826533e-03f, -5.032243e-03f, -3.409170e-03f, -2.086742e-03f, -1.101681e-03f, -4.316070e-04f, -2.285797e-05f, 1.890759e-04f, 2.662995e-04f, 2.612462e-04f, 2.138548e-04f, 1.516733e-04f, 9.161064e-05f, 4.237898e-05f, 6.986898e-06f, -1.508465e-05f, -2.618662e-05f, -2.931073e-05f, -2.734590e-05f, -2.268726e-05f, -1.709540e-05f, -1.171354e-05f, -7.168809e-06f, -3.705080e-06f, -1.313579e-06f, 1.569026e-07f, 9.173381e-07f, 1.183236e-06f, 1.142797e-06f, 9.427829e-07f, 6.863776e-07f, 4.380899e-07f, 2.319221e-07f, 8.021678e-08f, -1.836572e-08f, -7.248244e-08f, -9.354329e-08f, -9.279589e-08f, -7.974120e-08f, -6.151359e-08f, -4.288362e-08f, -2.660501e-08f, -1.390185e-08f, -4.962594e-09f, 6.337801e-10f, 3.591385e-09f, 4.673803e-09f, 4.570199e-09f, 3.830839e-09f, 2.850701e-09f, 1.882337e-09f, 1.063276e-09f, 4.476357e-10f, 3.560213e-11f, -2.024952e-10f, -3.085435e-10f, -3.251110e-10f, -2.889544e-10f, -2.282593e-10f, -1.623504e-10f, -1.028131e-10f, -5.523802e-11f, -2.105931e-11f, 8.205728e-13f, 1.275926e-11f, 1.748318e-11f, 1.753665e-11f, 1.500006e-11f, 1.139870e-11f, 7.731212e-12f, 4.560532e-12f, 2.126885e-12f, 4.568121e-13f, -5.455694e-13f, -1.030273e-12f, -1.155712e-12f, -1.062082e-12f, -8.591936e-13f, -6.240907e-13f, -4.044484e-13f, -2.247000e-13f, -9.279703e-14f, -6.349494e-15f, 4.246365e-14f, 6.337576e-14f, 6.579505e-14f, 5.760304e-14f, 4.470313e-14f, 3.105203e-14f, 1.895612e-14f, 9.472874e-15f, 2.813390e-15f, -1.312176e-15f, -3.430513e-15f, -4.124192e-15f, -3.922972e-15f, -3.250923e-15f, -2.412210e-15f, -1.600436e-15f, -9.197477e-16f, -4.094635e-16f, -6.711000e-17f, 1.327344e-16f, 2.246417e-16f, 2.433299e-16f, 2.186228e-16f, 1.733436e-16f, 1.231438e-16f, 7.743747e-17f, 4.081522e-17f, 1.452430e-17f, -2.227892e-18f, -1.125518e-17f, -1.467896e-17f, -1.448677e-17f, -1.230578e-17f, -9.328549e-18f, -6.334096e-18f, -3.759134e-18f, -1.786908e-18f, -4.331085e-19f, 3.821979e-19f, 7.807613e-19f, 8.901652e-19f, 8.229406e-19f, 6.670891e-19f, 4.843241e-19f, 3.128625e-19f, 1.723527e-19f, 6.928337e-20f, 1.884938e-21f, -3.595551e-20f, -5.189069e-20f, -5.333094e-20f, -4.648677e-20f, -3.600806e-20f, -2.500742e-20f, -1.529405e-20f, -7.691217e-21f, -2.354704e-21f, 9.539129e-22f, 2.658448e-21f, 3.225160e-21f, 3.077030e-21f, 2.552217e-21f, 1.893101e-21f, 1.253928e-21f, 7.177111e-22f, 3.158841e-22f, 4.664129e-23f, -1.100716e-22f, -1.815900e-22f, -1.953771e-22f, -1.750222e-22f, -1.385686e-22f, -9.837913e-23f, -6.187691e-23f, -3.266768e-23f, -1.171301e-23f, 1.636430e-24f, 8.834065e-24f, 1.157253e-23f, 1.143469e-23f, 9.715038e-24f, 7.361581e-24f, 4.993261e-24f, 2.956881e-24f, 1.397890e-24f, 3.287158e-25f, -3.140768e-25f, -6.270591e-25f, -7.113869e-25f, -6.562400e-25f, -5.313204e-25f, -3.854710e-25f, -2.489028e-25f, -1.371105e-25f, -5.516745e-26f, -1.613523e-27f, 2.844046e-26f, 4.109167e-26f, 4.223464e-26f, 3.680464e-26f, 2.849406e-26f, 1.977289e-26f, 1.207565e-26f, 6.054199e-27f, 1.830872e-27f, -7.844301e-28f, -2.128526e-27f, -2.571570e-27f, -2.449123e-27f, -2.029288e-27f, -1.504097e-27f, -9.956340e-28f, -5.694955e-28f, -2.503949e-28f, -3.672758e-29f, 8.753944e-29f, 1.441727e-28f, 1.550067e-28f, 1.387783e-28f, 1.098091e-28f, 7.790441e-29f, 4.894691e-29f, 2.578950e-29f, 9.188076e-30f, -1.377857e-30f, -7.064948e-30f, -9.218029e-30f, -9.093611e-30f, -7.718574e-30f, -5.844435e-30f, -3.961432e-30f, -2.343876e-30f, -1.106432e-30f, -2.583804e-31f, 2.510263e-31f, 4.986788e-31f, 5.649886e-31f, 5.207803e-31f, 4.213696e-31f, 3.054908e-31f, 1.970814e-31f, 1.084005e-31f, 4.344237e-32f, 1.024996e-33f, -2.274697e-32f, -3.272060e-32f, -3.357696e-32f, -2.923243e-32f, -2.261512e-32f, -1.568220e-32f, -9.569041e-33f, -4.790280e-33f, -1.440925e-33f, 6.313694e-34f, 1.694835e-33f, 2.043729e-33f, 1.944549e-33f, 1.610063e-33f, 1.192558e-33f, 7.887731e-34f, 4.506149e-34f, 1.975681e-34f, 2.826219e-35f, -7.009156e-35f, -1.148048e-34f, -1.232220e-34f, -1.102155e-34f, -8.714478e-35f, -6.178172e-35f, -3.878409e-35f, -2.040632e-35f, -7.240612e-36f, 1.131673e-36f, 5.632026e-36f, 7.329679e-36f, 7.222605e-36f, 6.125777e-36f, 4.635221e-36f, 3.139429e-36f, 1.855523e-36f, 8.739973e-37f, 2.018311e-37f, -2.015140e-37f, -3.972188e-37f, -4.491755e-37f, -4.136110e-37f, -3.344088e-37f, -2.422763e-37f, -1.561725e-37f, -8.579103e-38f, -3.427281e-38f, 0.000000e+00f, 1.815737e-38f, 2.602928e-38f, 2.667561e-38f, 2.320490e-38f, 1.793961e-38f, 1.243096e-38f}; static __device__ const T facC[306] = {2.132330e+00f, 2.973711e+00f, 3.386958e+00f, 3.397969e+00f, 3.108282e+00f, 2.638729e+00f, 2.097609e+00f, 1.566426e+00f, 1.096988e+00f, 7.149222e-01f, 4.260101e-01f, 2.230090e-01f, 9.164453e-02f, 1.518613e-02f, -2.248514e-02f, -3.513759e-02f, -3.347203e-02f, -2.510162e-02f, -1.493979e-02f, -5.772089e-03f, 1.142484e-03f, 5.533914e-03f, 7.691513e-03f, 8.152826e-03f, 7.500819e-03f, 6.248807e-03f, 4.789514e-03f, 3.386298e-03f, 2.188473e-03f, 1.257386e-03f, 5.944275e-04f, 1.658399e-04f, -7.797778e-05f, -1.891303e-04f, -2.142115e-04f, -1.904701e-04f, -1.448993e-04f, -9.511872e-05f, -5.115166e-05f, -1.746192e-05f, 5.144146e-06f, 1.795575e-05f, 2.320285e-05f, 2.328948e-05f, 2.035011e-05f, 1.604727e-05f, 1.153085e-05f, 7.492218e-06f, 4.262369e-06f, 1.919989e-06f, 3.889861e-07f, -4.841206e-07f, -8.753940e-07f, -9.492850e-07f, -8.409482e-07f, -6.503792e-07f, -4.440385e-07f, -2.604581e-07f, -1.172918e-07f, -1.818209e-08f, 4.144710e-08f, 7.001364e-08f, 7.681015e-08f, 7.024165e-08f, 5.698277e-08f, 4.176507e-08f, 2.754347e-08f, 1.584824e-08f, 7.188746e-09f, 1.426986e-09f, -1.919747e-09f, -3.459722e-09f, -3.785860e-09f, -3.398880e-09f, -2.677319e-09f, -1.878075e-09f, -1.153967e-09f, -5.782951e-10f, -1.698363e-10f, 8.549059e-11f, 2.178968e-10f, 2.621324e-10f, 2.504084e-10f, 2.088414e-10f, 1.563635e-10f, 1.051586e-10f, 6.188185e-11f, 2.913903e-11f, 6.895734e-12f, -6.355489e-12f, -1.273150e-11f, -1.438851e-11f, -1.319784e-11f, -1.060546e-11f, -7.614215e-12f, -4.837256e-12f, -2.583358e-12f, -9.479934e-13f, 1.054855e-13f, 6.817679e-13f, 9.082020e-13f, 9.063237e-13f, 7.768749e-13f, 5.944743e-13f, 4.084228e-13f, 2.468013e-13f, 1.218128e-13f, 3.504411e-14f, -1.807236e-14f, -4.488659e-14f, -5.325012e-14f, -5.017165e-14f, -4.118789e-14f, -3.022341e-14f, -1.974333e-14f, -1.104735e-14f, -4.600510e-15f, -3.369395e-16f, 2.094932e-15f, 3.154922e-15f, 3.296833e-15f, 2.905751e-15f, 2.273050e-15f, 1.595968e-15f, 9.909176e-16f, 5.125250e-16f, 1.731233e-16f, -4.030834e-17f, -1.530609e-16f, -1.936417e-16f, -1.882657e-16f, -1.581142e-16f, -1.185281e-16f, -7.939688e-17f, -4.615600e-17f, -2.098193e-17f, -3.924826e-18f, 6.155537e-18f, 1.089794e-17f, 1.198296e-17f, 1.087276e-17f, 8.697606e-18f, 6.240416e-18f, 3.978091e-18f, 2.148516e-18f, 8.222165e-19f, -3.366491e-20f, -5.048869e-19f, -6.943814e-19f, -6.998834e-19f, -6.022118e-19f, -4.611691e-19f, -3.162718e-19f, -1.900688e-19f, -9.242456e-20f, -2.472529e-20f, 1.656206e-20f, 3.720150e-20f, 4.337087e-20f, 4.059063e-20f, 3.322184e-20f, 2.435326e-20f, 1.592120e-20f, 8.941867e-21f, 3.772978e-21f, 3.540264e-22f, -1.599801e-21f, -2.457393e-21f, -2.581529e-21f, -2.279515e-21f, -1.783653e-21f, -1.251122e-21f, -7.747060e-22f, -3.980020e-22f, -1.309605e-22f, 3.663374e-23f, 1.247575e-22f, 1.559535e-22f, 1.509554e-22f, 1.265105e-22f, 9.474053e-23f, 6.344987e-23f, 3.691719e-23f, 1.684289e-23f, 3.247049e-24f, -4.789137e-24f, -8.575518e-24f, -9.451713e-24f, -8.581595e-24f, -6.864059e-24f, -4.921364e-24f, -3.132364e-24f, -1.685954e-24f, -6.381163e-25f, 3.722132e-26f, 4.080850e-25f, 5.560936e-25f, 5.587130e-25f, 4.799704e-25f, 3.672125e-25f, 2.516961e-25f, 1.512271e-25f, 7.356310e-26f, 1.975059e-26f, -1.305203e-26f, -2.944397e-26f, -3.434300e-26f, -3.213821e-26f, -2.629404e-26f, -1.926248e-26f, -1.257958e-26f, -7.050697e-27f, -2.958621e-27f, -2.544781e-28f, 1.288281e-27f, 1.962626e-27f, 2.056444e-27f, 1.813445e-27f, 1.417736e-27f, 9.937881e-28f, 6.149845e-28f, 3.157142e-28f, 1.037110e-28f, -2.924826e-29f, -9.909349e-29f, -1.237594e-28f, -1.197219e-28f, -1.002788e-28f, -7.504839e-29f, -5.021822e-29f, -2.917697e-29f, -1.326770e-29f, -2.501428e-30f, 3.854305e-30f, 6.840831e-30f, 7.521963e-30f, 6.821262e-30f, 5.451546e-30f, 3.905896e-30f, 2.484202e-30f, 1.335691e-30f, 5.042475e-31f, -3.121662e-32f, -3.249470e-31f, -4.418680e-31f, -4.435279e-31f, -3.807570e-31f, -2.911168e-31f, -1.993852e-31f, -1.196621e-31f, -5.807598e-32f, -1.543526e-32f, 1.053038e-32f, 2.348006e-32f, 2.732085e-32f, 2.553685e-32f, 2.087628e-32f, 1.528284e-32f, 9.973037e-33f, 5.583677e-33f, 2.337258e-33f, 1.935753e-34f, -1.028116e-33f, -1.560877e-33f, -1.633441e-33f, -1.439283e-33f, -1.124459e-33f, -7.876404e-34f, -4.869416e-34f, -2.495410e-34f, -8.148575e-35f, 2.381446e-35f, 7.904196e-35f, 9.845071e-35f, 9.512305e-35f, 7.961020e-35f, 5.953846e-35f, 3.980992e-35f, 2.310555e-35f, 1.048421e-35f, 1.949313e-36f, -3.084043e-36f, -5.444446e-36f, -5.977210e-36f, -5.415579e-36f, -4.325085e-36f, -3.096643e-36f, -1.967779e-36f, -1.056483e-36f, -3.972256e-37f, 2.698968e-38f, 2.593803e-37f, 3.515631e-37f, 3.524196e-37f, 3.022893e-37f, 2.309607e-37f, 1.580685e-37f, 9.477289e-38f, 4.591126e-38f, 1.210517e-38f, 0.000000e+00f, -1.870049e-38f, -2.171775e-38f, -2.027947e-38f, -1.656624e-38f, -1.211922e-38f}; // shared memory size is 15760 bytes static __device__ unsigned int counter = 0; static __global__ __launch_bounds__(block_size, 2) void Recurrence1(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 1; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; } val0 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; } } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val0; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; } else { output[offs + (0 * block_size)] = val0; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence2(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 2; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val1; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence3(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 3; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val2; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence4(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 4; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val3; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence5(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 5; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val4; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence6(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 6; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4, val5; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; val5 = 0; if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; val5 = input[offs + (5 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; val5 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 2); if (cond) val5 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 4); if (cond) val5 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 4); if (cond) val5 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 8); if (cond) val5 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 2, 8); if (cond) val5 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 3, 8); if (cond) val5 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 5, 16); if (cond) val5 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 6, 16); if (cond) val5 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 7, 16); if (cond) val5 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 13, 32); if (cond) val5 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 14, 32); if (cond) val5 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 15, 32); if (cond) val5 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; spartc[clane + (15 * order + 5 * delta)] = val5; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val5 += sfacB[tid] * spartc[31 * order + (4 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; val5 += sfacC[tid] * spartc[31 * order + (4 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; spartc[clane + (31 * order + 5 * delta)] = val5; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val5; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; output[offs + (5 * block_size)] = val5; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence7(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 7; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4, val5, val6; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; val5 = 0; if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)]; val6 = 0; if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; val5 = input[offs + (5 * block_size)]; val6 = input[offs + (6 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; val5 *= 4.035270e-02f; val6 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 2); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 2); if (cond) val6 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 4); if (cond) val6 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 4); if (cond) val6 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 8); if (cond) val6 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 2, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 2, 8); if (cond) val6 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 3, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 3, 8); if (cond) val6 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 5, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 5, 16); if (cond) val6 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 6, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 6, 16); if (cond) val6 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 7, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 7, 16); if (cond) val6 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 13, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 13, 32); if (cond) val6 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 14, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 14, 32); if (cond) val6 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 15, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 15, 32); if (cond) val6 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; spartc[clane + (15 * order + 5 * delta)] = val5; spartc[clane + (15 * order + 6 * delta)] = val6; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; spartc[clane + (31 * order + 6 * delta)] = val6; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val5 += sfacB[tid] * spartc[31 * order + (4 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; val5 += sfacC[tid] * spartc[31 * order + (4 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; spartc[clane + (31 * order + 5 * delta)] = val5; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val6 += sfacB[tid] * spartc[31 * order + (5 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; val6 += sfacC[tid] * spartc[31 * order + (5 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val6; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5; if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; output[offs + (5 * block_size)] = val5; output[offs + (6 * block_size)] = val6; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence8(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 8; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4, val5, val6, val7; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; val5 = 0; if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)]; val6 = 0; if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)]; val7 = 0; if (offs + (7 * block_size) < items) val7 = input[offs + (7 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; val5 = input[offs + (5 * block_size)]; val6 = input[offs + (6 * block_size)]; val7 = input[offs + (7 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; val5 *= 4.035270e-02f; val6 *= 4.035270e-02f; val7 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 2); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 2); if (cond) val6 += spc; spc = help * __shfl(val7, 0, 2); if (cond) val7 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 4); if (cond) val6 += spc; spc = help * __shfl(val7, 0, 4); if (cond) val7 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 4); if (cond) val6 += spc; spc = help * __shfl(val7, 1, 4); if (cond) val7 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 1, 8); if (cond) val7 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 2, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 2, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 2, 8); if (cond) val7 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 3, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 3, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 3, 8); if (cond) val7 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 5, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 5, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 5, 16); if (cond) val7 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 6, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 6, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 6, 16); if (cond) val7 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 7, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 7, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 7, 16); if (cond) val7 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 13, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 13, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 13, 32); if (cond) val7 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 14, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 14, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 14, 32); if (cond) val7 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 15, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 15, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 15, 32); if (cond) val7 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; spartc[clane + (15 * order + 5 * delta)] = val5; spartc[clane + (15 * order + 6 * delta)] = val6; spartc[clane + (15 * order + 7 * delta)] = val7; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; spartc[clane + (31 * order + 6 * delta)] = val6; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)]; val7 += sfacA[tid] * spartc[31 * order + (6 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val5 += sfacB[tid] * spartc[31 * order + (4 * delta + 1)]; val7 += sfacB[tid] * spartc[31 * order + (6 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; val5 += sfacC[tid] * spartc[31 * order + (4 * delta + 2)]; val7 += sfacC[tid] * spartc[31 * order + (6 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; spartc[clane + (31 * order + 5 * delta)] = val5; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val6 += sfacB[tid] * spartc[31 * order + (5 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; val6 += sfacC[tid] * spartc[31 * order + (5 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val7; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5; if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6; if (offs + (7 * block_size) < items) output[offs + (7 * block_size)] = val7; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; output[offs + (5 * block_size)] = val5; output[offs + (6 * block_size)] = val6; output[offs + (7 * block_size)] = val7; } } static __global__ __launch_bounds__(block_size, 2) void Recurrence9(const int items, const T* const __restrict__ input, T* const __restrict__ output, volatile int* const __restrict__ status, volatile T* const __restrict__ partcarry, volatile T* const __restrict__ fullcarry) { const int valsperthread = 9; const int chunk_size = valsperthread * block_size; __shared__ T spartc[chunk_size / warp_size * order]; __shared__ T sfullc[order]; __shared__ int cid; const int tid = threadIdx.x; const int warp = tid / warp_size; const int lane = tid % warp_size; __shared__ T sfacA[block_size]; __shared__ T sfacB[block_size]; __shared__ T sfacC[block_size]; if (tid < 300) sfacA[tid] = facA[tid]; else sfacA[tid] = 0; if (tid < 307) sfacB[tid] = facB[tid]; else sfacB[tid] = 0; if (tid < 306) sfacC[tid] = facC[tid]; else sfacC[tid] = 0; if (tid == 0) { cid = atomicInc(&counter, gridDim.x - 1); } __syncthreads(); const int chunk_id = cid; const int offs = tid + chunk_id * chunk_size; T val0, val1, val2, val3, val4, val5, val6, val7, val8; if (chunk_id == gridDim.x - 1) { val0 = 0; if (offs + (0 * block_size) < items) val0 = input[offs + (0 * block_size)]; val1 = 0; if (offs + (1 * block_size) < items) val1 = input[offs + (1 * block_size)]; val2 = 0; if (offs + (2 * block_size) < items) val2 = input[offs + (2 * block_size)]; val3 = 0; if (offs + (3 * block_size) < items) val3 = input[offs + (3 * block_size)]; val4 = 0; if (offs + (4 * block_size) < items) val4 = input[offs + (4 * block_size)]; val5 = 0; if (offs + (5 * block_size) < items) val5 = input[offs + (5 * block_size)]; val6 = 0; if (offs + (6 * block_size) < items) val6 = input[offs + (6 * block_size)]; val7 = 0; if (offs + (7 * block_size) < items) val7 = input[offs + (7 * block_size)]; val8 = 0; if (offs + (8 * block_size) < items) val8 = input[offs + (8 * block_size)]; } else { val0 = input[offs + (0 * block_size)]; val1 = input[offs + (1 * block_size)]; val2 = input[offs + (2 * block_size)]; val3 = input[offs + (3 * block_size)]; val4 = input[offs + (4 * block_size)]; val5 = input[offs + (5 * block_size)]; val6 = input[offs + (6 * block_size)]; val7 = input[offs + (7 * block_size)]; val8 = input[offs + (8 * block_size)]; } val0 *= 4.035270e-02f; val1 *= 4.035270e-02f; val2 *= 4.035270e-02f; val3 *= 4.035270e-02f; val4 *= 4.035270e-02f; val5 *= 4.035270e-02f; val6 *= 4.035270e-02f; val7 *= 4.035270e-02f; val8 *= 4.035270e-02f; const T sfA = sfacA[lane]; const T sfB = sfacB[lane]; const T sfC = sfacC[lane]; int cond; T help, spc; help = 2.132330e+00f; cond = ((lane & 1) != 0); spc = help * __shfl(val0, 0, 2); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 2); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 2); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 2); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 2); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 2); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 2); if (cond) val6 += spc; spc = help * __shfl(val7, 0, 2); if (cond) val7 += spc; spc = help * __shfl(val8, 0, 2); if (cond) val8 += spc; help = __shfl(sfB, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 0, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 0, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 0, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 0, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 0, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 0, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 0, 4); if (cond) val6 += spc; spc = help * __shfl(val7, 0, 4); if (cond) val7 += spc; spc = help * __shfl(val8, 0, 4); if (cond) val8 += spc; help = __shfl(sfC, lane % 2); cond = ((lane & 2) != 0); spc = help * __shfl(val0, 1, 4); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 4); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 4); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 4); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 4); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 4); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 4); if (cond) val6 += spc; spc = help * __shfl(val7, 1, 4); if (cond) val7 += spc; spc = help * __shfl(val8, 1, 4); if (cond) val8 += spc; help = __shfl(sfA, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 1, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 1, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 1, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 1, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 1, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 1, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 1, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 1, 8); if (cond) val7 += spc; spc = help * __shfl(val8, 1, 8); if (cond) val8 += spc; help = __shfl(sfB, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 2, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 2, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 2, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 2, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 2, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 2, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 2, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 2, 8); if (cond) val7 += spc; spc = help * __shfl(val8, 2, 8); if (cond) val8 += spc; help = __shfl(sfC, lane % 4); cond = ((lane & 4) != 0); spc = help * __shfl(val0, 3, 8); if (cond) val0 += spc; spc = help * __shfl(val1, 3, 8); if (cond) val1 += spc; spc = help * __shfl(val2, 3, 8); if (cond) val2 += spc; spc = help * __shfl(val3, 3, 8); if (cond) val3 += spc; spc = help * __shfl(val4, 3, 8); if (cond) val4 += spc; spc = help * __shfl(val5, 3, 8); if (cond) val5 += spc; spc = help * __shfl(val6, 3, 8); if (cond) val6 += spc; spc = help * __shfl(val7, 3, 8); if (cond) val7 += spc; spc = help * __shfl(val8, 3, 8); if (cond) val8 += spc; help = __shfl(sfA, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 5, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 5, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 5, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 5, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 5, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 5, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 5, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 5, 16); if (cond) val7 += spc; spc = help * __shfl(val8, 5, 16); if (cond) val8 += spc; help = __shfl(sfB, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 6, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 6, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 6, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 6, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 6, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 6, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 6, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 6, 16); if (cond) val7 += spc; spc = help * __shfl(val8, 6, 16); if (cond) val8 += spc; help = __shfl(sfC, lane % 8); cond = ((lane & 8) != 0); spc = help * __shfl(val0, 7, 16); if (cond) val0 += spc; spc = help * __shfl(val1, 7, 16); if (cond) val1 += spc; spc = help * __shfl(val2, 7, 16); if (cond) val2 += spc; spc = help * __shfl(val3, 7, 16); if (cond) val3 += spc; spc = help * __shfl(val4, 7, 16); if (cond) val4 += spc; spc = help * __shfl(val5, 7, 16); if (cond) val5 += spc; spc = help * __shfl(val6, 7, 16); if (cond) val6 += spc; spc = help * __shfl(val7, 7, 16); if (cond) val7 += spc; spc = help * __shfl(val8, 7, 16); if (cond) val8 += spc; help = __shfl(sfA, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 13, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 13, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 13, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 13, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 13, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 13, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 13, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 13, 32); if (cond) val7 += spc; spc = help * __shfl(val8, 13, 32); if (cond) val8 += spc; help = __shfl(sfB, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 14, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 14, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 14, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 14, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 14, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 14, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 14, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 14, 32); if (cond) val7 += spc; spc = help * __shfl(val8, 14, 32); if (cond) val8 += spc; help = __shfl(sfC, lane % 16); cond = ((lane & 16) != 0); spc = help * __shfl(val0, 15, 32); if (cond) val0 += spc; spc = help * __shfl(val1, 15, 32); if (cond) val1 += spc; spc = help * __shfl(val2, 15, 32); if (cond) val2 += spc; spc = help * __shfl(val3, 15, 32); if (cond) val3 += spc; spc = help * __shfl(val4, 15, 32); if (cond) val4 += spc; spc = help * __shfl(val5, 15, 32); if (cond) val5 += spc; spc = help * __shfl(val6, 15, 32); if (cond) val6 += spc; spc = help * __shfl(val7, 15, 32); if (cond) val7 += spc; spc = help * __shfl(val8, 15, 32); if (cond) val8 += spc; const int delta = block_size / warp_size * order; const int clane = lane - (warp_size - order); const int clwo = clane + warp * order; if (((warp & 1) == 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; spartc[clwo + 8 * delta] = val8; } __syncthreads(); if ((warp & 1) != 0) { const int cwarp = ((warp & ~1) | 0) * order; const T helpA = sfacA[tid % (warp_size * 1)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 1)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 1)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; if (((warp & 3) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; spartc[clwo + 8 * delta] = val8; } } __syncthreads(); if ((warp & 2) != 0) { const int cwarp = ((warp & ~3) | 1) * order; const T helpA = sfacA[tid % (warp_size * 2)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 2)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 2)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; if (((warp & 7) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; spartc[clwo + 8 * delta] = val8; } } __syncthreads(); if ((warp & 4) != 0) { const int cwarp = ((warp & ~7) | 3) * order; const T helpA = sfacA[tid % (warp_size * 4)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 4)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 4)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; if (((warp & 15) != 0) && (clane >= 0)) { spartc[clwo + 0 * delta] = val0; spartc[clwo + 1 * delta] = val1; spartc[clwo + 2 * delta] = val2; spartc[clwo + 3 * delta] = val3; spartc[clwo + 4 * delta] = val4; spartc[clwo + 5 * delta] = val5; spartc[clwo + 6 * delta] = val6; spartc[clwo + 7 * delta] = val7; spartc[clwo + 8 * delta] = val8; } } __syncthreads(); if ((warp & 8) != 0) { const int cwarp = ((warp & ~15) | 7) * order; const T helpA = sfacA[tid % (warp_size * 8)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 8)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 8)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; if ((warp == 15) && (clane >= 0)) { spartc[clane + (15 * order + 0 * delta)] = val0; spartc[clane + (15 * order + 1 * delta)] = val1; spartc[clane + (15 * order + 2 * delta)] = val2; spartc[clane + (15 * order + 3 * delta)] = val3; spartc[clane + (15 * order + 4 * delta)] = val4; spartc[clane + (15 * order + 5 * delta)] = val5; spartc[clane + (15 * order + 6 * delta)] = val6; spartc[clane + (15 * order + 7 * delta)] = val7; spartc[clane + (15 * order + 8 * delta)] = val8; } } __syncthreads(); if ((warp & 16) != 0) { if ((warp & 15) < 10) { const int cwarp = 15 * order; const T helpA = sfacA[tid % (warp_size * 16)]; val0 += helpA * spartc[cwarp + (0 * delta + 0)]; val1 += helpA * spartc[cwarp + (1 * delta + 0)]; val2 += helpA * spartc[cwarp + (2 * delta + 0)]; val3 += helpA * spartc[cwarp + (3 * delta + 0)]; val4 += helpA * spartc[cwarp + (4 * delta + 0)]; val5 += helpA * spartc[cwarp + (5 * delta + 0)]; val6 += helpA * spartc[cwarp + (6 * delta + 0)]; val7 += helpA * spartc[cwarp + (7 * delta + 0)]; val8 += helpA * spartc[cwarp + (8 * delta + 0)]; const T helpB = sfacB[tid % (warp_size * 16)]; val0 += helpB * spartc[cwarp + (0 * delta + 1)]; val1 += helpB * spartc[cwarp + (1 * delta + 1)]; val2 += helpB * spartc[cwarp + (2 * delta + 1)]; val3 += helpB * spartc[cwarp + (3 * delta + 1)]; val4 += helpB * spartc[cwarp + (4 * delta + 1)]; val5 += helpB * spartc[cwarp + (5 * delta + 1)]; val6 += helpB * spartc[cwarp + (6 * delta + 1)]; val7 += helpB * spartc[cwarp + (7 * delta + 1)]; val8 += helpB * spartc[cwarp + (8 * delta + 1)]; const T helpC = sfacC[tid % (warp_size * 16)]; val0 += helpC * spartc[cwarp + (0 * delta + 2)]; val1 += helpC * spartc[cwarp + (1 * delta + 2)]; val2 += helpC * spartc[cwarp + (2 * delta + 2)]; val3 += helpC * spartc[cwarp + (3 * delta + 2)]; val4 += helpC * spartc[cwarp + (4 * delta + 2)]; val5 += helpC * spartc[cwarp + (5 * delta + 2)]; val6 += helpC * spartc[cwarp + (6 * delta + 2)]; val7 += helpC * spartc[cwarp + (7 * delta + 2)]; val8 += helpC * spartc[cwarp + (8 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 0 * delta)] = val0; spartc[clane + (31 * order + 2 * delta)] = val2; spartc[clane + (31 * order + 4 * delta)] = val4; spartc[clane + (31 * order + 6 * delta)] = val6; spartc[clane + (31 * order + 8 * delta)] = val8; } } __syncthreads(); if (warp < 10) { val1 += sfacA[tid] * spartc[31 * order + (0 * delta + 0)]; val3 += sfacA[tid] * spartc[31 * order + (2 * delta + 0)]; val5 += sfacA[tid] * spartc[31 * order + (4 * delta + 0)]; val7 += sfacA[tid] * spartc[31 * order + (6 * delta + 0)]; val1 += sfacB[tid] * spartc[31 * order + (0 * delta + 1)]; val3 += sfacB[tid] * spartc[31 * order + (2 * delta + 1)]; val5 += sfacB[tid] * spartc[31 * order + (4 * delta + 1)]; val7 += sfacB[tid] * spartc[31 * order + (6 * delta + 1)]; val1 += sfacC[tid] * spartc[31 * order + (0 * delta + 2)]; val3 += sfacC[tid] * spartc[31 * order + (2 * delta + 2)]; val5 += sfacC[tid] * spartc[31 * order + (4 * delta + 2)]; val7 += sfacC[tid] * spartc[31 * order + (6 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 1 * delta)] = val1; spartc[clane + (31 * order + 5 * delta)] = val5; } __syncthreads(); if (warp < 10) { val2 += sfacA[tid] * spartc[31 * order + (1 * delta + 0)]; val6 += sfacA[tid] * spartc[31 * order + (5 * delta + 0)]; val2 += sfacB[tid] * spartc[31 * order + (1 * delta + 1)]; val6 += sfacB[tid] * spartc[31 * order + (5 * delta + 1)]; val2 += sfacC[tid] * spartc[31 * order + (1 * delta + 2)]; val6 += sfacC[tid] * spartc[31 * order + (5 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 3 * delta)] = val3; } __syncthreads(); if (warp < 10) { val4 += sfacA[tid] * spartc[31 * order + (3 * delta + 0)]; val4 += sfacB[tid] * spartc[31 * order + (3 * delta + 1)]; val4 += sfacC[tid] * spartc[31 * order + (3 * delta + 2)]; } if ((warp == 31) && (clane >= 0)) { spartc[clane + (31 * order + 7 * delta)] = val7; } __syncthreads(); if (warp < 10) { val8 += sfacA[tid] * spartc[31 * order + (7 * delta + 0)]; val8 += sfacB[tid] * spartc[31 * order + (7 * delta + 1)]; val8 += sfacC[tid] * spartc[31 * order + (7 * delta + 2)]; } const int idx = tid - (block_size - order); if (idx >= 0) { fullcarry[chunk_id * order + idx] = val8; __threadfence(); if (idx == 0) { status[chunk_id] = 2; } } if (chunk_id > 0) { __syncthreads(); if (warp == 0) { const int cidm1 = chunk_id - 1; int flag = 1; do { if ((cidm1 - lane) >= 0) { flag = status[cidm1 - lane]; } } while ((__any(flag == 0)) || (__all(flag != 2))); int mask = __ballot(flag == 2); const int pos = __ffs(mask) - 1; T fc; if (lane < order) { fc = fullcarry[(cidm1 - pos) * order + lane]; } T X0 = __shfl(fc, 0); T X1 = __shfl(fc, 1); T X2 = __shfl(fc, 2); if (lane == 0) { sfullc[0] = X0; sfullc[1] = X1; sfullc[2] = X2; } } __syncthreads(); T X0 = sfullc[0]; val0 += sfacA[tid] * X0; T X1 = sfullc[1]; val0 += sfacB[tid] * X1; T X2 = sfullc[2]; val0 += sfacC[tid] * X2; } if (chunk_id == gridDim.x - 1) { if (offs + (0 * block_size) < items) output[offs + (0 * block_size)] = val0; if (offs + (1 * block_size) < items) output[offs + (1 * block_size)] = val1; if (offs + (2 * block_size) < items) output[offs + (2 * block_size)] = val2; if (offs + (3 * block_size) < items) output[offs + (3 * block_size)] = val3; if (offs + (4 * block_size) < items) output[offs + (4 * block_size)] = val4; if (offs + (5 * block_size) < items) output[offs + (5 * block_size)] = val5; if (offs + (6 * block_size) < items) output[offs + (6 * block_size)] = val6; if (offs + (7 * block_size) < items) output[offs + (7 * block_size)] = val7; if (offs + (8 * block_size) < items) output[offs + (8 * block_size)] = val8; } else { output[offs + (0 * block_size)] = val0; output[offs + (1 * block_size)] = val1; output[offs + (2 * block_size)] = val2; output[offs + (3 * block_size)] = val3; output[offs + (4 * block_size)] = val4; output[offs + (5 * block_size)] = val5; output[offs + (6 * block_size)] = val6; output[offs + (7 * block_size)] = val7; output[offs + (8 * block_size)] = val8; } } struct GPUTimer { cudaEvent_t beg, end; GPUTimer() {cudaEventCreate(&beg); cudaEventCreate(&end);} ~GPUTimer() {cudaEventDestroy(beg); cudaEventDestroy(end);} void start() {cudaEventRecord(beg, 0);} double stop() {cudaEventRecord(end, 0); cudaEventSynchronize(end); float ms; cudaEventElapsedTime(&ms, beg, end); return 0.001 * ms;} }; int main(int argc, char *argv[]) { printf("Parallel Linear Recurrence Computation\n"); printf("Copyright (c) 2018 Texas State University\n"); if (argc != 2) { fprintf(stderr, "USAGE: %s problem_size\n", argv[0]); return -1; } const int n = atoi(argv[1]); if (n < 1) {fprintf(stderr, "ERROR: problem_size must be at least 1\n"); return -1;}; int *d_status; T *h_in, *h_out, *h_sol, *d_in, *d_out, *d_partcarry, *d_fullcarry; const size_t size = n * sizeof(T); h_in = (T *)malloc(size); assert(h_in != NULL); h_out = (T *)malloc(size); assert(h_out != NULL); h_sol = (T *)malloc(size); assert(h_sol != NULL); for (int i = 0; i < n; i++) { h_in[i] = (i & 32) / 16 - 1; h_sol[i] = 0; } for (int i = 0; i < n; i++) { if ((i - 0) >= 0) { h_sol[i] += 4.035270e-02f * h_in[i - 0]; } } for (int i = 1; i < n; i++) { if ((i - 1) >= 0) { h_sol[i] += 2.132330e+00f * h_sol[i - 1]; } if ((i - 2) >= 0) { h_sol[i] += -1.573120e+00f * h_sol[i - 2]; } if ((i - 3) >= 0) { h_sol[i] += 4.004360e-01f * h_sol[i - 3]; } } cudaSetDevice(device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); const int SMs = deviceProp.multiProcessorCount; int valsperthread = 1; while ((valsperthread < 9) && (block_size * 2 * SMs * valsperthread < n)) { valsperthread++; } const int chunk_size = valsperthread * block_size; const int iterations = 5; assert(cudaSuccess == cudaMalloc(&d_in, size)); assert(cudaSuccess == cudaMalloc(&d_out, size)); assert(cudaSuccess == cudaMalloc(&d_status, (n + chunk_size - 1) / chunk_size * sizeof(int))); assert(cudaSuccess == cudaMalloc(&d_partcarry, (n + chunk_size - 1) / chunk_size * order * sizeof(T))); assert(cudaSuccess == cudaMalloc(&d_fullcarry, (n + chunk_size - 1) / chunk_size * order * sizeof(T))); assert(cudaSuccess == cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice)); assert(cudaSuccess == cudaMemcpy(d_out, d_in, size, cudaMemcpyDeviceToDevice)); cudaMemset(d_status, 0, (n + chunk_size - 1) / chunk_size * sizeof(int)); switch (valsperthread) { case 1: Recurrence1<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 2: Recurrence2<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 3: Recurrence3<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 4: Recurrence4<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 5: Recurrence5<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 6: Recurrence6<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 7: Recurrence7<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 8: Recurrence8<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 9: Recurrence9<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; } GPUTimer timer; timer.start(); for (long i = 0; i < iterations; i++) { cudaMemset(d_status, 0, (n + chunk_size - 1) / chunk_size * sizeof(int)); switch (valsperthread) { case 1: Recurrence1<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 2: Recurrence2<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 3: Recurrence3<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 4: Recurrence4<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 5: Recurrence5<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 6: Recurrence6<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 7: Recurrence7<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 8: Recurrence8<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; case 9: Recurrence9<<<(n + chunk_size - 1) / chunk_size, block_size>>>(n, d_in, d_out, d_status, d_partcarry, d_fullcarry); break; } } double runtime = timer.stop() / iterations; double throughput = 0.000000001 * n / runtime; assert(cudaSuccess == cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost)); for (int i = 0; i < n; i++) { T s = h_sol[i]; T o = h_out[i]; if (fabsf(o - s) > 0.001) { printf("result not correct at index %d: %e != %e\n", i, h_sol[i], h_out[i]); return -1; } } printf("size = %d\tthroughput = %7.4f gigaitems/s\truntime = %7.4f s\tPassed!\n", n, throughput, runtime); printf("first elements of result are:\n"); for (int i = 0; (i < 8) && (i < n); i++) { printf(" %f", h_out[i]); } printf("\n"); free(h_in); free(h_out); free(h_sol); cudaFree(d_in); cudaFree(d_out); cudaFree(d_status); cudaFree(d_partcarry); cudaFree(d_fullcarry); return 0; }
65b65801bf903ce676080b0716f56bccde4e2597.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated c Tue Dec 17 13:18:45 2013 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches clacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void cgeadd_batched_kernel( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * const *dAarray, int ldda, magmaFloatComplex **dBarray, int lddb ) { // dA and dB iterate across row i const magmaFloatComplex *dA = dAarray[ blockIdx.y ]; magmaFloatComplex *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaFloatComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_cgeadd_batched( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex * const *dAarray, magma_int_t ldda, magmaFloatComplex **dBarray, magma_int_t lddb, magma_int_t batchCount ) { /* Purpose ======= ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments ========= M (input) INTEGER The number of rows of each matrix dAarray[i]. M >= 0. N (input) INTEGER The number of columns of each matrix dAarray[i]. N >= 0. ALPHA (input) COMPLEX REAL The scalar alpha. dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX REAL array, dimension (LDDA,N) The m by n matrices dAarray[i]. LDDA (input) INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). dBarray (input/output) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX REAL array, dimension (LDDB,N) The m by n matrices dBarray[i]. LDDB (input) INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). batchCount (input) INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); hipLaunchKernelGGL(( cgeadd_batched_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dAarray, ldda, dBarray, lddb ); }
65b65801bf903ce676080b0716f56bccde4e2597.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated c Tue Dec 17 13:18:45 2013 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches clacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void cgeadd_batched_kernel( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * const *dAarray, int ldda, magmaFloatComplex **dBarray, int lddb ) { // dA and dB iterate across row i const magmaFloatComplex *dA = dAarray[ blockIdx.y ]; magmaFloatComplex *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaFloatComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_cgeadd_batched( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex * const *dAarray, magma_int_t ldda, magmaFloatComplex **dBarray, magma_int_t lddb, magma_int_t batchCount ) { /* Purpose ======= ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments ========= M (input) INTEGER The number of rows of each matrix dAarray[i]. M >= 0. N (input) INTEGER The number of columns of each matrix dAarray[i]. N >= 0. ALPHA (input) COMPLEX REAL The scalar alpha. dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX REAL array, dimension (LDDA,N) The m by n matrices dAarray[i]. LDDA (input) INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). dBarray (input/output) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX REAL array, dimension (LDDB,N) The m by n matrices dBarray[i]. LDDB (input) INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). batchCount (input) INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); cgeadd_batched_kernel<<< grid, threads, 0, magma_stream >>>( m, n, alpha, dAarray, ldda, dBarray, lddb ); }
4e442acd49320bf3ac81b8a72b1277b4e901892d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "defs_gpu.cuh" #include "time_stamp.h" texture<float, 1, hipReadModeElementType> texC; texture<float, 1, hipReadModeElementType> texA; texture<float, 1, hipReadModeElementType> texX; texture<float, 1, hipReadModeElementType> texY; texture<float, 1, hipReadModeElementType> texCos; texture<float, 2, hipReadModeElementType> texAR; __global__ void find_cart_new(float * pc_res_d, float *pc_a_d, float *pc_r_d, int cart_len){ int start = blockIdx.x*cart_len; for(int i=start;i<(start+cart_len);i++){ //pc_res_d[i]= tex2D(texPC,pc_a_d[i],pc_r_d[i]); pc_res_d[i]= tex2D(texAR,pc_a_d[i]+0.5f,pc_r_d[i]+0.5f); } } static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } __global__ void cuda_data_r2c(Complex *dev_inc, float *dev_in, unsigned int sp){ unsigned int i, j; i = blockIdx.x * blockDim.x + threadIdx.x; j = i + sp; dev_inc[i].x = dev_in[j]; dev_inc[i].y = 0.0f; } __global__ void cuda_data_c2r_3(float *dev_in, Complex *dev_inc, unsigned int shift, unsigned int nc, unsigned int nr, unsigned int nrow){ unsigned int i, i2, j; i = blockIdx.x * blockDim.x + threadIdx.x; i2 = i + shift; j = blockIdx.y * blockDim.y + threadIdx.y; dev_in[(nrow+j)*nr+i] = dev_inc[j*nc+i2].x; } __global__ void cuda_data_c2r(float *dev_in, Complex *dev_inc, unsigned int sp){ unsigned int i, j; i = blockIdx.x * blockDim.x + threadIdx.x; j = i + sp; dev_in[j] = dev_inc[i].x; } __global__ void cuda_mul_c(const Complex *dev_fc, Complex *dev_inc){ unsigned int i; Complex c; i = blockIdx.x * blockDim.x + threadIdx.x; c = ComplexMul(dev_fc[i],dev_inc[i]); dev_inc[i] = c; } __global__ void cprod(Complex* a, const Complex* b) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; a[i] = ComplexMul(a[i], b[i]); } __global__ void creal(const Complex* a, float* b) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; b[i] = a[i].x; } __global__ void cuda_filter_r2c(Complex *dev_fc, float *dev_fr, unsigned int nx){ unsigned int i, j, k; i = blockIdx.x * blockDim.x + threadIdx.x; j = i%nx; k = blockIdx.y * nx + i; dev_fc[k].x = dev_fr[j]; dev_fc[k].y = 0.0f; } __global__ void fbp_axial(float* dev_out, float da, unsigned int nx, unsigned int ny, float fnr){ unsigned int ixx, i; float r, s, sum; ixx = blockIdx.x*blockDim.x + threadIdx.x; r = float(ixx)-fnr; sum = 0.0f; for(i = 0; i<ny; i++){ s = fnr + r*cosf(da*float(i)); sum += tex1Dfetch(texA,s); } dev_out[ixx] = sum; } __global__ void set_zero(float *veco, unsigned int nxo){ unsigned int tig; tig = (blockIdx.y*blockDim.y + threadIdx.y)*nxo + blockIdx.x*blockDim.x + threadIdx.x; veco[tig] = 0.0f; } bool get_gpu_info(allData *aD){ /* aD->gi->major = 1; aD->gi->minor = 1; aD->gi->multiProcessorCount = 16; aD->gi->regsPerBlock = 8*1024; aD->gi->warpSize = 32; aD->gi->sharedMemPerBlock = 16*1024; // aD->gi->maxResidentThreads = prop.maxThreadsPerMultiProcessor;// aD->gi->maxThreadsPerBlock = 512; aD->gi->maxResidentBlocks = 8; if(aD->gi->major == 2){ aD->gi->maxResidentThreads = 1536;//new aD->gi->sharedMemBanks = 32; aD->gi->maxResidentWarps = 48; }else if(aD->gi->minor > 1){ aD->gi->maxResidentThreads = 1024;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 32; }else{ aD->gi->maxResidentThreads = 768;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 24; } */ /*aD->gi->major = 2; aD->gi->minor = 0; aD->gi->multiProcessorCount = 30; aD->gi->regsPerBlock = 32*1024; aD->gi->warpSize = 32; aD->gi->sharedMemPerBlock = 48*1024; // aD->gi->maxResidentThreads = prop.maxThreadsPerMultiProcessor;// aD->gi->maxThreadsPerBlock = 1024; aD->gi->maxResidentBlocks = 8; if(aD->gi->major == 2){ aD->gi->maxResidentThreads = 1536;//new aD->gi->sharedMemBanks = 32; aD->gi->maxResidentWarps = 48; }else if(aD->gi->minor > 1){ aD->gi->maxResidentThreads = 1024;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 32; }else{ aD->gi->maxResidentThreads = 768;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 24; }*/ hipError_t ce; int deviceCount; int MyDevice; printTagStart(aD,"GPUDevices"); ce = hipGetDeviceCount(&deviceCount); if(ce != hipSuccess){ sprintf(aD->message, "can not count GPUs: \"%s\"", hipGetErrorString(ce)); printError(aD); return false; }else{ printTag(aD,"NumberOfDevices",deviceCount); } if(deviceCount < 1 || deviceCount > 10){ printError(aD,"device count - wrong number"); return false; } printTag(aD,"RequestedIndexOfDevice",aD->hms->fbp->GPUDeviceNumber); hipGetDevice(&MyDevice); printTag(aD,"MyCudaDevice",MyDevice); hipDeviceProp_t prop; hipGetDeviceProperties(&prop,MyDevice); aD->gi->major = (unsigned int)prop.major; aD->gi->minor = (unsigned int)prop.minor; aD->gi->multiProcessorCount = (unsigned int)prop.multiProcessorCount; aD->gi->regsPerBlock = (unsigned int)prop.regsPerBlock; aD->gi->warpSize = (unsigned int)prop.warpSize; aD->gi->sharedMemPerBlock = (unsigned int)(prop.sharedMemPerBlock); // aD->gi->maxResidentThreads = prop.maxThreadsPerMultiProcessor;// aD->gi->maxThreadsPerBlock = (unsigned int)prop.maxThreadsPerBlock; aD->gi->maxResidentBlocks = 8; if(aD->gi->major == 2){ aD->gi->maxResidentThreads = 1536;//new aD->gi->sharedMemBanks = 32; aD->gi->maxResidentWarps = 48; }else if(aD->gi->minor > 1){ aD->gi->maxResidentThreads = 1024;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 32; }else{ aD->gi->maxResidentThreads = 768;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 24; } printTagStart(aD,"GPUDevice"); printTag(aD,"Name",prop.name); printTag(aD,"MultiProcessorCount",(unsigned int)prop.multiProcessorCount); printTag(aD,"ClockRate",prop.clockRate/1000,"in MHz"); printTag(aD,"TotalGlobalMemory",(unsigned int)(prop.totalGlobalMem),"bytes"); printTag(aD,"TotalGlobalMemory",(float)(float(prop.totalGlobalMem)/1073741824.0),"GB"); printTag(aD,"RevisionMajor",(unsigned int)prop.major); printTag(aD,"RevisionMinor",(unsigned int)prop.minor); sprintf(aD->message, "%i x %i x %i", (unsigned int)prop.maxGridSize[0], (unsigned int)prop.maxGridSize[1], (unsigned int)prop.maxGridSize[2]); printTag(aD,"MaximumGridSize",aD->message,"maximum size of a grid of thread blocks"); sprintf(aD->message, "%i x %i x %i", (unsigned int)prop.maxThreadsDim[0], (unsigned int)prop.maxThreadsDim[1], (unsigned int)prop.maxThreadsDim[2]); printTag(aD,"MaximumThreadSize",aD->message,"the maximum size of each dimension of a block"); printTag(aD,"WarpSize",(unsigned int)(prop.warpSize)); printTag(aD,"MaxWarpsPerMultiProcessor",(unsigned int)(aD->gi->maxResidentWarps),"maximum number of resident warps per multiprocessor"); printTag(aD,"MaxThreadsPerBlock",(unsigned int)(prop.maxThreadsPerBlock)); // printTag(aD,"MaxThreadsPerMultiProcessor",prop.maxThreadsPerMultiProcessor); printTag(aD,"MaxBlocksPerMultiProcessor",(unsigned int)(aD->gi->maxResidentBlocks),"maximum number of resident blocks per multiprocessor"); printTag(aD,"RegistersPerBlock",(unsigned int)(prop.regsPerBlock),"maximum number of 32-bit registers available to a thread block"); printTag(aD,"SharedMemoryPerBlock",(unsigned int)(prop.sharedMemPerBlock),"maximum amount of shared memory available to a thread block in bytes"); printTag(aD,"SharedMemoryBanks",(unsigned int)(aD->gi->sharedMemBanks),"number of shared memory banks"); // printTag(aD,"L2cache",prop.l2CacheSize,"bytes"); printTagEnd(aD);//GPUDevice printTagEnd(aD);//GPUDevices return true; } bool fbp_axial_cuda(allData *aD){ gpu_info *gi; xData *xd; unsigned int mb, mt, nx, ny; unsigned int uu, ub, maxt; float fnr, da; size_t mem_size, mem_size_c; float *dev_in, *dev_out; hipfftComplex *dev_filter, *dev_data; hipfftHandle plan; timestamp("Starting fbp_axial_cuda",4); xd = aD->data; gi = aD->gi; nx = aD->ta_nx; ny = aD->ny; mb = gi->maxResidentBlocks; mt = gi->maxResidentThreads; maxt = mt/mb; uu = 1; while(maxt>1){ uu*=2; maxt/=2; } maxt = uu; ub = nx/maxt; da = aD->gi->rotAngleStep; fnr = 0.5f*float(nx-1); dim3 grids_p(ub,1,1); dim3 threads_p(maxt,1,1); mem_size = nx*sizeof(float); mem_size_c = nx*sizeof(hipfftComplex); timestamp("calling cuda to automatically get a device",4); hipFree(NULL); // hipSetDevice(aD->hms->fbp->GPUDeviceNumber); { int MyDevice; char devmsg[128]; hipGetDevice(&MyDevice); snprintf(devmsg,128,"my device %i",MyDevice); timestamp(devmsg,4); } timestamp("calling hipMalloc",4); CUDA_SAFE_CALL(hipMalloc((void**)&dev_filter,mem_size_c)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_data,mem_size_c)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_in,mem_size)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_out,mem_size)); timestamp("finished hipMalloc",4); CUDA_SAFE_CALL(hipMemcpy(dev_filter,xd->veccF,mem_size_c,hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(dev_data,xd->veccI,mem_size_c,hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipDeviceSynchronize()); HMFFT_SAFE_CALL(hipfftPlan1d(&plan, nx, HIPFFT_C2C, 1)); HMFFT_SAFE_CALL(hipfftExecC2C(plan, dev_data, dev_data, HIPFFT_FORWARD)); CUDA_SAFE_CALL(hipDeviceSynchronize()); hipLaunchKernelGGL(( cprod), dim3(grids_p),dim3(threads_p), 0, 0, dev_data,dev_filter); CUDA_SAFE_CALL(hipDeviceSynchronize()); HMFFT_SAFE_CALL(hipfftExecC2C(plan, dev_data, dev_data, HIPFFT_BACKWARD)); CUDA_SAFE_CALL(hipDeviceSynchronize()); hipLaunchKernelGGL(( creal), dim3(grids_p),dim3(threads_p), 0, 0, dev_data,dev_in); CUDA_SAFE_CALL(hipDeviceSynchronize()); hipfftDestroy(plan); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); texA.normalized = false; texA.filterMode = hipFilterModeLinear; texA.addressMode[0] = hipAddressModeClamp; CUDA_SAFE_CALL(hipBindTexture(NULL, texA, dev_in, channelDesc, mem_size)); hipLaunchKernelGGL(( fbp_axial), dim3(grids_p), dim3(threads_p), 0, 0, dev_out, da, nx, ny, fnr); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipMemcpy(xd->vecto, dev_out, mem_size, hipMemcpyDeviceToHost)); hipFree(dev_in); dev_in = NULL; hipFree(dev_out); dev_out = NULL; hipFree(dev_data); dev_data = NULL; hipFree(dev_filter); dev_filter = NULL; timestamp("Finishing fbp_axial_cuda",4); return true; } __global__ void fbp_std2(float* temp_dev, unsigned int nxo, unsigned int mw, float xc, float *dev_CS, unsigned int vH, unsigned int chnum){ extern __shared__ float shm[]; float2 *vcs = (float2 *)&shm[0]; float sum, p, fitt; float x, y; unsigned int ixx, iyy, itt, j, tib, tpb; tib = threadIdx.y * blockDim.x + threadIdx.x; tpb = blockDim.x * blockDim.y; ixx = blockIdx.x*blockDim.x + threadIdx.x; iyy = blockIdx.y*blockDim.y + threadIdx.y; itt = ixx+iyy*nxo; fitt = float(itt); x = tex1Dfetch(texX,fitt); y = tex1Dfetch(texY,fitt); sum = 0.0f; shm[tib] = dev_CS[tib+chnum*tpb]; __syncthreads(); for(j=0; j<vH; j++){ p = xc + x*vcs[j].x+ y*vcs[j].y + float(j*mw); sum += (tex1Dfetch(texA,p)); } temp_dev[itt] += sum; } bool fbp_cuda(allData *aD){ xData *xd = aD->data; size_t mem_CS; size_t mem_out, mem_in; size_t mem_chunk; size_t mem_comp; size_t mem_shared; float *dev_in, *dev_out; float *dev_X, *dev_Y; float *dev_CS; unsigned int hTimer; unsigned int nx, ny, nxo, nyo, wo, ho; unsigned int cl, cw; unsigned int vH, sp; hipfftComplex *dev_fc, *dev_inc; hipfftHandle plan; double gpuTime; xFBP_gpu *fg; timestamp("starting fbp_cuda",4); fg = aD->fg; nx = fg->nx; ny = fg->ny; nxo = fg->nxo; nyo = fg->nyo; wo = fg->blockWidth; ho = fg->blockHeight; cl = fg->chunkLeft; cw = fg->chunkWidth; vH = fg->vH; mem_shared = wo*ho*sizeof(float); mem_comp = nx*vH*sizeof(hipfftComplex); mem_chunk = cw*vH*sizeof(float); mem_CS = 2*ny*sizeof(float); mem_out = nxo*nyo*sizeof(float); mem_in = nx*ny*sizeof(float); dim3 grids_in(nx*vH/(wo*ho),1); dim3 threads_in(wo*ho,1); dim3 grids_ch(cw/wo,vH/ho,1); dim3 threads_ch(wo,ho,1); dim3 grids_bp(nxo/wo,nyo/ho,1); dim3 threads_bp(wo,ho,1); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); CUT_SAFE_CALL( cutResetTimer(hTimer)); CUT_SAFE_CALL( cutStartTimer(hTimer)); timestamp("calling hipMalloc",4); { int MyDevice; char devmsg[128]; hipGetDevice(&MyDevice); snprintf(devmsg,128,"my device %i",MyDevice); timestamp(devmsg,4); } CUDA_SAFE_CALL(hipMalloc((void**)&dev_fc,mem_comp)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_inc,mem_comp)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_in,mem_in)); timestamp("finished hipMalloc",4); CUDA_SAFE_CALL(hipMemcpy(dev_fc,xd->veccF,mem_comp,hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(dev_in,xd->vta,mem_in,hipMemcpyHostToDevice)); CUFFT_SAFE_CALL(hipfftPlan1d(&plan, nx, HIPFFT_C2C, vH)); texA.normalized = false; texA.filterMode = hipFilterModeLinear; texA.addressMode[0] = hipAddressModeClamp; texX.normalized = false; texX.filterMode = hipFilterModeLinear; texX.addressMode[0] = hipAddressModeClamp; texY.normalized = false; texY.filterMode = hipFilterModeLinear; texY.addressMode[0] = hipAddressModeClamp; timestamp("calling hipMalloc",4); CUDA_SAFE_CALL(hipMalloc((void**)&dev_CS,mem_CS)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_out,mem_out)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_X,mem_out)); CUDA_SAFE_CALL(hipMalloc((void**)&dev_Y,mem_out)); timestamp("finished hipMalloc",4); CUDA_SAFE_CALL(hipMemcpy(dev_X,xd->vecX,mem_out,hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(dev_Y,xd->vecY,mem_out,hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(dev_CS,xd->vecCS,mem_CS,hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipBindTexture(NULL, texA, dev_in, channelDesc, mem_chunk)); CUDA_SAFE_CALL(hipBindTexture(NULL, texX, dev_X, channelDesc, mem_out)); CUDA_SAFE_CALL(hipBindTexture(NULL, texY, dev_Y, channelDesc, mem_out)); hipLaunchKernelGGL(( set_zero), dim3(grids_bp), dim3(threads_bp), 0, 0, dev_out, nxo); CUDA_SAFE_CALL( hipDeviceSynchronize() ); for(unsigned int i=0;i<fg->numChunks; i++){ sp = i * nx * vH; hipLaunchKernelGGL(( cuda_data_r2c), dim3(grids_in),dim3(threads_in), 0, 0, dev_inc,dev_in,sp); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUFFT_SAFE_CALL(hipfftExecC2C(plan, dev_inc, dev_inc, HIPFFT_FORWARD)); CUDA_SAFE_CALL( hipDeviceSynchronize() ); hipLaunchKernelGGL(( cuda_mul_c), dim3(grids_in),dim3(threads_in), 0, 0, dev_fc,dev_inc); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUFFT_SAFE_CALL(hipfftExecC2C(plan, dev_inc, dev_inc, HIPFFT_BACKWARD)); CUDA_SAFE_CALL( hipDeviceSynchronize() ); hipLaunchKernelGGL(( cuda_data_c2r_3), dim3(grids_ch),dim3(threads_ch), 0, 0, dev_in, dev_inc, cl, nx, cw, 0); CUDA_SAFE_CALL( hipDeviceSynchronize() ); hipLaunchKernelGGL(( fbp_std2), dim3(grids_bp), dim3(threads_bp), mem_shared, 0, dev_out, nxo, cw, fg->xc, dev_CS, vH, i); CUDA_SAFE_CALL( hipDeviceSynchronize() ); } hipfftDestroy(plan); hipFree(dev_inc); dev_inc = NULL; hipFree(dev_fc); dev_fc = NULL; CUDA_SAFE_CALL(hipMemcpy(xd->vto,dev_out,mem_out,hipMemcpyDeviceToHost)); hipFree(dev_in); dev_in = NULL; hipFree(dev_out); dev_out = NULL; hipFree(dev_CS); dev_CS = NULL; hipFree(dev_X); dev_X = NULL; hipFree(dev_Y); dev_Y = NULL; CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printTag(aD,"BackprojectionTime",(float)gpuTime,"in ms"); timestamp("finished fbp_cuda",4); return true; } __global__ void xb_proj_new(unsigned int indexs, unsigned int starty, float *vif, float *vecxy, float *vec_sc, float xc, unsigned int vH){ unsigned int ij, ijk, tib, i; float s; float2 cs, xy; tib = (blockDim.x*threadIdx.y + threadIdx.x)/2; cs = *(float2 *)(&vec_sc[2*(tib+starty)]); ij = 2*blockIdx.x + indexs; for(i = 0; i<2; i++){ xy = *(float2 *)(&vecxy[2*ij]); ijk = ij*vH+tib; s = __fmaf_rn(xy.x,cs.x,xc); vif[ijk] = __fmaf_rn(xy.y,cs.y,s); ij++; } } __global__ void xb_proj2_new(unsigned int starty, float *vec_x, float *vecxy, float *vec_cs){ unsigned int ij, ijk, tib, i, tpb; float s; float2 cs, xy; tpb = blockDim.x * blockDim.y; tib = (blockDim.x*threadIdx.y + threadIdx.x)/2; cs = *(float2 *)(&vec_cs[2*(tib+starty)]); ij = 2*blockIdx.x; for(i = 0; i<2; i++){ xy = *(float2 *)(&vecxy[2*ij]); ijk = tib*tpb+ij; s = __fmul_rn(xy.x,cs.x); vec_x[ijk] = __fmaf_rn(xy.y,cs.y,s); ij++; } } __global__ void fbp_std5(float *vb, float *veco, float *vt, unsigned int nx, unsigned int vH, unsigned int nxo, unsigned int nxb){ float rr, sum; unsigned int ixq, ivc, ic; unsigned int tib, tig, big, tpb; tib = blockDim.x*threadIdx.y + threadIdx.x; big = blockIdx.y*nxb + blockIdx.x; tig = (blockIdx.y*blockDim.y + threadIdx.y)*nxo + blockIdx.x*blockDim.x + threadIdx.x; tpb = blockDim.x * blockDim.y; ixq = tib; ivc = big*vH; sum = 0.0f; for(ic = 0; ic<vH; ic++){ rr = __fadd_rn(vt[ixq],vb[ivc+ic]); sum += tex1Dfetch(texA,rr+float(ic*nx)); ixq += tpb; } veco[tig] += sum; } bool fbp_cuda_20(allData *aD){ xData *xd = aD->data; unsigned int nx, ny, nxo, nyo; unsigned int wo, ho; unsigned int nxb, nyb; unsigned int mt, mb; unsigned int uu, ub; unsigned int maxt; unsigned int vH; unsigned int nf, sp; unsigned int starty, indexs; unsigned int nxyb, nlen, nxyb2; unsigned int hTimer; size_t mem_fc, mem_chunk; size_t mem_in, mem_out; size_t mem_CS, mem_block_xy; size_t mem_thread_x, mem_thread_xy, mem_block; float xc; float *dev_thread_x, *dev_thread_xy, *dev_block; float *dev_in, *dev_out; float *dev_block_xy; float *dev_CS; gpu_info *gi; double gpuTime; hipfftComplex *dev_fc, *dev_inc; hipfftHandle plan; timestamp("starting fbp_cuda_20",4); gi = aD->gi; xc = aD->new_xc; vH = gi->vertH; wo = gi->wo; ho = gi->ho; nx = aD->ta_nx; ny = aD->ta_ny; nxo = gi->mxo; nyo = gi->myo; nxb = nxo/wo; nyb = nyo/ho; mem_block = nxb*nyb*vH*sizeof(float); mem_thread_x = wo*ho*vH*sizeof(float); mem_thread_xy = 2*wo*ho*sizeof(float); mem_block_xy = 2 * nxb * nyb * sizeof(float); mem_in = nx * ny *sizeof(float); mem_out = nxo * nyo *sizeof(float); mem_CS = 2*ny*sizeof(float); mem_fc = nx * vH * sizeof(hipfftComplex); mem_chunk = nx * vH * sizeof(float); mb = gi->maxResidentBlocks; mt = gi->maxResidentThreads; maxt = mt/mb; uu = 1; while(maxt>1){ uu*=2; maxt/=2; } maxt = uu; ub = nx/maxt; nf = ny/vH; nxyb = nxb*nyb; nlen = (nxyb/4); if(nxyb%4 > 0) nlen++; nxyb2 = nxyb-2*nlen; dim3 threads_xb(1,1,1); dim3 threads_1(maxt,1,1); dim3 threads_bp(wo,ho,1); dim3 grid_xb2(vH,1,1); dim3 grid_xb_new(nlen,1,1); dim3 grids_2(ub*vH,1,1); dim3 grid_bp(nxb,nyb,1); HMCUT_SAFE_CALL( cutCreateTimer(&hTimer) ); HMCUT_SAFE_CALL( cutResetTimer(hTimer)); HMCUT_SAFE_CALL( cutStartTimer(hTimer)); timestamp("calling hipMalloc ",4); { int MyDevice; char devmsg[128]; hipGetDevice(&MyDevice); snprintf(devmsg,128,"my device %i",MyDevice); timestamp(devmsg,4); } HM_SAFE_CALL(hipMalloc((void**)&dev_block,mem_block)); HM_SAFE_CALL(hipMalloc((void**)&dev_thread_x,mem_thread_x)); HM_SAFE_CALL(hipMalloc((void**)&dev_thread_xy,mem_thread_xy)); HM_SAFE_CALL(hipMalloc((void**)&dev_fc,mem_fc)); HM_SAFE_CALL(hipMalloc((void**)&dev_inc,mem_fc)); HM_SAFE_CALL(hipMalloc((void**)&dev_in,mem_in)); HM_SAFE_CALL(hipMalloc((void**)&dev_out,mem_out)); HM_SAFE_CALL(hipMalloc((void**)&dev_block_xy,mem_block_xy)); HM_SAFE_CALL(hipMalloc((void**)&dev_CS,mem_CS)); timestamp("finished hipMalloc ",4); HM_SAFE_CALL(hipMemcpy(dev_fc,xd->veccF,mem_fc,hipMemcpyHostToDevice)); HM_SAFE_CALL(hipMemcpy(dev_in,xd->vta,mem_in,hipMemcpyHostToDevice)); HM_SAFE_CALL(hipMemcpy(dev_block_xy,xd->vecbXY,mem_block_xy,hipMemcpyHostToDevice)); HM_SAFE_CALL(hipMemcpy(dev_thread_xy,xd->vecXY_block,mem_thread_xy,hipMemcpyHostToDevice)); HM_SAFE_CALL(hipMemcpy(dev_CS,xd->vecCS,mem_CS,hipMemcpyHostToDevice)); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); texA.normalized = false; texA.filterMode = hipFilterModeLinear; texA.addressMode[0] = hipAddressModeClamp; CUDA_SAFE_CALL(hipBindTexture(NULL, texA, dev_in, channelDesc, mem_chunk)); /*rca: AARGH -- plan is done for each slice -- this defeats the purpose of planning the FFT */ timestamp("before plan creation",4); HMFFT_SAFE_CALL(hipfftPlan1d(&plan, nx, HIPFFT_C2C, vH)); timestamp("after plan creation",4); hipLaunchKernelGGL(( set_zero), dim3(grid_bp), dim3(threads_bp), 0, 0, dev_out, nxo); HM_SAFE_CALL( hipDeviceSynchronize() ); timestamp("before backprojection loop ",4); for(unsigned int i = 0;i<nf;i++){ sp = i*nx*vH; hipLaunchKernelGGL(( cuda_data_r2c), dim3(grids_2),dim3(threads_1), 0, 0, dev_inc,dev_in,sp); HM_SAFE_CALL( hipDeviceSynchronize() ); HMFFT_SAFE_CALL(hipfftExecC2C(plan, dev_inc, dev_inc, HIPFFT_FORWARD)); HM_SAFE_CALL( hipDeviceSynchronize() ); hipLaunchKernelGGL(( cuda_mul_c), dim3(grids_2),dim3(threads_1), 0, 0, dev_fc,dev_inc); HM_SAFE_CALL( hipDeviceSynchronize() ); HMFFT_SAFE_CALL(hipfftExecC2C(plan, dev_inc, dev_inc, HIPFFT_BACKWARD)); HM_SAFE_CALL( hipDeviceSynchronize() ); sp = 0; hipLaunchKernelGGL(( cuda_data_c2r), dim3(grids_2),dim3(threads_1), 0, 0, dev_in,dev_inc,sp); HM_SAFE_CALL( hipDeviceSynchronize() ); starty = i*vH; indexs = 0; hipLaunchKernelGGL(( xb_proj_new), dim3(grid_xb_new), dim3(threads_bp), 0, 0, indexs, starty, dev_block, dev_block_xy, dev_CS, xc, vH); HM_SAFE_CALL( hipDeviceSynchronize() ); indexs = nxyb2; hipLaunchKernelGGL(( xb_proj_new), dim3(grid_xb_new), dim3(threads_bp), 0, 0, indexs, starty, dev_block, dev_block_xy, dev_CS, xc, vH); HM_SAFE_CALL( hipDeviceSynchronize() ); hipLaunchKernelGGL(( xb_proj2_new), dim3(grid_xb2), dim3(threads_bp), 0, 0, starty, dev_thread_x, dev_thread_xy, dev_CS); HM_SAFE_CALL( hipDeviceSynchronize() ); hipLaunchKernelGGL(( fbp_std5), dim3(grid_bp), dim3(threads_bp), 0, 0, dev_block, dev_out, dev_thread_x, nx, vH, nxo, nxb); HM_SAFE_CALL( hipDeviceSynchronize() ); } timestamp("after backprojection loop ",4); hipfftDestroy(plan); timestamp("after plan destruction",4); HM_SAFE_CALL(hipMemcpy(xd->vto, dev_out, mem_out, hipMemcpyDeviceToHost)); hipFree(dev_fc); dev_fc = NULL; hipFree(dev_inc); dev_inc = NULL; hipFree(dev_in); dev_in = NULL; hipFree(dev_block); dev_block = NULL; hipFree(dev_thread_x); dev_thread_x = NULL; hipFree(dev_thread_xy); dev_thread_xy = NULL; hipFree(dev_out); dev_out = NULL; hipFree(dev_CS); dev_CS = NULL; hipFree(dev_block_xy); dev_block_xy = NULL; HMCUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printTag(aD,"TimeBackprojection",float(gpuTime),"time (in ms)"); timestamp("finished fbp_cuda_20",4); return true; } __global__ void fbp_cpu2(float *dev_in, float *dev_pol, float *dev_Cos, unsigned int starty, unsigned int nx, unsigned int na, unsigned int vH, unsigned int pola, float ps, float xc_new){ float x, xx, sum; unsigned int ic, irr; unsigned int tib, tig, tpb; int iaa, aa, ia, dr, dm; tib = blockDim.x*threadIdx.y + threadIdx.x; iaa = blockIdx.x*blockDim.x + threadIdx.x; irr = blockIdx.y*blockDim.y + threadIdx.y; tig = irr*pola + iaa; tpb = blockDim.x * blockDim.y; x = ps * float(irr); sum = 0.0f; float t; ia = starty + iaa; dm = ia/na; dr = ia - dm * na; if(dm%2 == 0){ t= 1.0f; }else{ t= -1.0f; } for(ic = 0; ic<vH; ic++){ if(dr == na){ dr = 0; t*= -1.0f; } xx = xc_new + t* x * tex1Dfetch(texCos, dr); sum += tex1Dfetch(texA,xx + float(ic*nx)); dr++; } dev_pol[tig] += sum; } bool fbp_cuda_cpu2(allData *aD){ xData *xd = aD->data; unsigned int nx, ny, polr, pola, nba, nbr; unsigned int wo, ho; unsigned int mt, mb; unsigned int uu, ub; unsigned int maxt; unsigned int vH; unsigned int nf, sp, na; unsigned int starty, indexs; unsigned int hTimer; size_t mem_fc, mem_chunk; size_t mem_in; size_t mem_Cos, mem_pol; float xc, ps; float *dev_in, *dev_pol; float *dev_Cos; gpu_info *gi; double gpuTime; timestamp("calling fbp_cuda_cpu2",4); gi = aD->gi; pola = gi->pol_a; polr = gi->pol_r; ps = gi->outputPixelSize; hipfftComplex *dev_fc, *dev_inc; hipfftHandle plan; xc = aD->new_xc; vH = gi->vertH; wo = gi->wo; ho = gi->ho; nx = aD->ta_nx; ny = aD->ta_ny; na = aD->ta_ny_13; nba = pola/wo; nbr = polr/ho; printf("pola: %i, polr: %i\n",pola, polr); mem_in = nx * ny *sizeof(float); mem_pol = pola *polr *sizeof(float); mem_Cos = na*sizeof(float); mem_fc = nx * vH * sizeof(hipfftComplex); mem_chunk = nx * vH * sizeof(float); mb = gi->maxResidentBlocks; mt = gi->maxResidentThreads; maxt = mt/mb; uu = 1; while(maxt>1){ uu*=2; maxt/=2; } maxt = uu; ub = nx/maxt; nf = ny/vH; dim3 threads_1(maxt,1,1); dim3 threads_bp(wo,ho,1); dim3 grids_2(ub*vH,1,1); dim3 grid_bp(nba,nbr,1); HMCUT_SAFE_CALL( cutCreateTimer(&hTimer) ); HMCUT_SAFE_CALL( cutResetTimer(hTimer)); HMCUT_SAFE_CALL( cutStartTimer(hTimer)); { int MyDevice; char devmsg[128]; hipGetDevice(&MyDevice); snprintf(devmsg,128,"my device %i",MyDevice); timestamp(devmsg,4); } HM_SAFE_CALL(hipMalloc((void**)&dev_fc,mem_fc)); HM_SAFE_CALL(hipMalloc((void**)&dev_inc,mem_fc)); HM_SAFE_CALL(hipMalloc((void**)&dev_in,mem_in)); HM_SAFE_CALL(hipMalloc((void**)&dev_pol,mem_pol)); HM_SAFE_CALL(hipMalloc((void**)&dev_Cos,mem_Cos)); HM_SAFE_CALL(hipMemcpy(dev_fc,xd->veccF,mem_fc,hipMemcpyHostToDevice)); HM_SAFE_CALL(hipMemcpy(dev_in,xd->vta,mem_in,hipMemcpyHostToDevice)); HM_SAFE_CALL(hipMemcpy(dev_Cos,xd->vecCos,mem_Cos,hipMemcpyHostToDevice)); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); texA.normalized = false; texA.filterMode = hipFilterModeLinear; texA.addressMode[0] = hipAddressModeClamp; texCos.normalized = false; texCos.filterMode = hipFilterModeLinear; texCos.addressMode[0] = hipAddressModeClamp; CUDA_SAFE_CALL(hipBindTexture(NULL, texA, dev_in, channelDesc, mem_chunk)); CUDA_SAFE_CALL(hipBindTexture(NULL, texCos, dev_Cos, channelDesc, mem_Cos)); HMFFT_SAFE_CALL(hipfftPlan1d(&plan, nx, HIPFFT_C2C, vH)); hipLaunchKernelGGL(( set_zero), dim3(grid_bp), dim3(threads_bp), 0, 0, dev_pol, nba); HM_SAFE_CALL( hipDeviceSynchronize() ); for(unsigned int i = 0;i<nf;i++){ //for(unsigned int i = 0;i<1;i++){ sp = i*nx*vH; hipLaunchKernelGGL(( cuda_data_r2c), dim3(grids_2),dim3(threads_1), 0, 0, dev_inc,dev_in,sp); HM_SAFE_CALL( hipDeviceSynchronize() ); HMFFT_SAFE_CALL(hipfftExecC2C(plan, dev_inc, dev_inc, HIPFFT_FORWARD)); HM_SAFE_CALL( hipDeviceSynchronize() ); hipLaunchKernelGGL(( cuda_mul_c), dim3(grids_2),dim3(threads_1), 0, 0, dev_fc,dev_inc); HM_SAFE_CALL( hipDeviceSynchronize() ); HMFFT_SAFE_CALL(hipfftExecC2C(plan, dev_inc, dev_inc, HIPFFT_BACKWARD)); HM_SAFE_CALL( hipDeviceSynchronize() ); sp = 0; hipLaunchKernelGGL(( cuda_data_c2r), dim3(grids_2),dim3(threads_1), 0, 0, dev_in,dev_inc,sp); HM_SAFE_CALL( hipDeviceSynchronize() ); starty = i*vH; hipLaunchKernelGGL(( fbp_cpu2), dim3(grid_bp), dim3(threads_bp), 0, 0, dev_in, dev_pol, dev_Cos, starty, nx, na, vH, pola, ps, xc); HM_SAFE_CALL( hipDeviceSynchronize() ); } hipfftDestroy(plan); HM_SAFE_CALL(hipMemcpy(xd->vecPol, dev_pol, mem_pol, hipMemcpyDeviceToHost)); hipFree(dev_fc); dev_fc = NULL; hipFree(dev_inc); dev_inc = NULL; hipFree(dev_in); dev_in = NULL; hipFree(dev_pol); dev_pol = NULL; hipFree(dev_Cos); dev_Cos = NULL; HMCUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printTag(aD,"TimeBackprojection",float(gpuTime),"time (in ms)"); return true; }
4e442acd49320bf3ac81b8a72b1277b4e901892d.cu
#include "defs_gpu.cuh" #include "time_stamp.h" texture<float, 1, cudaReadModeElementType> texC; texture<float, 1, cudaReadModeElementType> texA; texture<float, 1, cudaReadModeElementType> texX; texture<float, 1, cudaReadModeElementType> texY; texture<float, 1, cudaReadModeElementType> texCos; texture<float, 2, cudaReadModeElementType> texAR; __global__ void find_cart_new(float * pc_res_d, float *pc_a_d, float *pc_r_d, int cart_len){ int start = blockIdx.x*cart_len; for(int i=start;i<(start+cart_len);i++){ //pc_res_d[i]= tex2D(texPC,pc_a_d[i],pc_r_d[i]); pc_res_d[i]= tex2D(texAR,pc_a_d[i]+0.5f,pc_r_d[i]+0.5f); } } static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } __global__ void cuda_data_r2c(Complex *dev_inc, float *dev_in, unsigned int sp){ unsigned int i, j; i = blockIdx.x * blockDim.x + threadIdx.x; j = i + sp; dev_inc[i].x = dev_in[j]; dev_inc[i].y = 0.0f; } __global__ void cuda_data_c2r_3(float *dev_in, Complex *dev_inc, unsigned int shift, unsigned int nc, unsigned int nr, unsigned int nrow){ unsigned int i, i2, j; i = blockIdx.x * blockDim.x + threadIdx.x; i2 = i + shift; j = blockIdx.y * blockDim.y + threadIdx.y; dev_in[(nrow+j)*nr+i] = dev_inc[j*nc+i2].x; } __global__ void cuda_data_c2r(float *dev_in, Complex *dev_inc, unsigned int sp){ unsigned int i, j; i = blockIdx.x * blockDim.x + threadIdx.x; j = i + sp; dev_in[j] = dev_inc[i].x; } __global__ void cuda_mul_c(const Complex *dev_fc, Complex *dev_inc){ unsigned int i; Complex c; i = blockIdx.x * blockDim.x + threadIdx.x; c = ComplexMul(dev_fc[i],dev_inc[i]); dev_inc[i] = c; } __global__ void cprod(Complex* a, const Complex* b) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; a[i] = ComplexMul(a[i], b[i]); } __global__ void creal(const Complex* a, float* b) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; b[i] = a[i].x; } __global__ void cuda_filter_r2c(Complex *dev_fc, float *dev_fr, unsigned int nx){ unsigned int i, j, k; i = blockIdx.x * blockDim.x + threadIdx.x; j = i%nx; k = blockIdx.y * nx + i; dev_fc[k].x = dev_fr[j]; dev_fc[k].y = 0.0f; } __global__ void fbp_axial(float* dev_out, float da, unsigned int nx, unsigned int ny, float fnr){ unsigned int ixx, i; float r, s, sum; ixx = blockIdx.x*blockDim.x + threadIdx.x; r = float(ixx)-fnr; sum = 0.0f; for(i = 0; i<ny; i++){ s = fnr + r*cosf(da*float(i)); sum += tex1Dfetch(texA,s); } dev_out[ixx] = sum; } __global__ void set_zero(float *veco, unsigned int nxo){ unsigned int tig; tig = (blockIdx.y*blockDim.y + threadIdx.y)*nxo + blockIdx.x*blockDim.x + threadIdx.x; veco[tig] = 0.0f; } bool get_gpu_info(allData *aD){ /* aD->gi->major = 1; aD->gi->minor = 1; aD->gi->multiProcessorCount = 16; aD->gi->regsPerBlock = 8*1024; aD->gi->warpSize = 32; aD->gi->sharedMemPerBlock = 16*1024; // aD->gi->maxResidentThreads = prop.maxThreadsPerMultiProcessor;// aD->gi->maxThreadsPerBlock = 512; aD->gi->maxResidentBlocks = 8; if(aD->gi->major == 2){ aD->gi->maxResidentThreads = 1536;//new aD->gi->sharedMemBanks = 32; aD->gi->maxResidentWarps = 48; }else if(aD->gi->minor > 1){ aD->gi->maxResidentThreads = 1024;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 32; }else{ aD->gi->maxResidentThreads = 768;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 24; } */ /*aD->gi->major = 2; aD->gi->minor = 0; aD->gi->multiProcessorCount = 30; aD->gi->regsPerBlock = 32*1024; aD->gi->warpSize = 32; aD->gi->sharedMemPerBlock = 48*1024; // aD->gi->maxResidentThreads = prop.maxThreadsPerMultiProcessor;// aD->gi->maxThreadsPerBlock = 1024; aD->gi->maxResidentBlocks = 8; if(aD->gi->major == 2){ aD->gi->maxResidentThreads = 1536;//new aD->gi->sharedMemBanks = 32; aD->gi->maxResidentWarps = 48; }else if(aD->gi->minor > 1){ aD->gi->maxResidentThreads = 1024;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 32; }else{ aD->gi->maxResidentThreads = 768;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 24; }*/ cudaError_t ce; int deviceCount; int MyDevice; printTagStart(aD,"GPUDevices"); ce = cudaGetDeviceCount(&deviceCount); if(ce != cudaSuccess){ sprintf(aD->message, "can not count GPUs: \"%s\"", cudaGetErrorString(ce)); printError(aD); return false; }else{ printTag(aD,"NumberOfDevices",deviceCount); } if(deviceCount < 1 || deviceCount > 10){ printError(aD,"device count - wrong number"); return false; } printTag(aD,"RequestedIndexOfDevice",aD->hms->fbp->GPUDeviceNumber); cudaGetDevice(&MyDevice); printTag(aD,"MyCudaDevice",MyDevice); cudaDeviceProp prop; cudaGetDeviceProperties(&prop,MyDevice); aD->gi->major = (unsigned int)prop.major; aD->gi->minor = (unsigned int)prop.minor; aD->gi->multiProcessorCount = (unsigned int)prop.multiProcessorCount; aD->gi->regsPerBlock = (unsigned int)prop.regsPerBlock; aD->gi->warpSize = (unsigned int)prop.warpSize; aD->gi->sharedMemPerBlock = (unsigned int)(prop.sharedMemPerBlock); // aD->gi->maxResidentThreads = prop.maxThreadsPerMultiProcessor;// aD->gi->maxThreadsPerBlock = (unsigned int)prop.maxThreadsPerBlock; aD->gi->maxResidentBlocks = 8; if(aD->gi->major == 2){ aD->gi->maxResidentThreads = 1536;//new aD->gi->sharedMemBanks = 32; aD->gi->maxResidentWarps = 48; }else if(aD->gi->minor > 1){ aD->gi->maxResidentThreads = 1024;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 32; }else{ aD->gi->maxResidentThreads = 768;//new aD->gi->sharedMemBanks = 16; aD->gi->maxResidentWarps = 24; } printTagStart(aD,"GPUDevice"); printTag(aD,"Name",prop.name); printTag(aD,"MultiProcessorCount",(unsigned int)prop.multiProcessorCount); printTag(aD,"ClockRate",prop.clockRate/1000,"in MHz"); printTag(aD,"TotalGlobalMemory",(unsigned int)(prop.totalGlobalMem),"bytes"); printTag(aD,"TotalGlobalMemory",(float)(float(prop.totalGlobalMem)/1073741824.0),"GB"); printTag(aD,"RevisionMajor",(unsigned int)prop.major); printTag(aD,"RevisionMinor",(unsigned int)prop.minor); sprintf(aD->message, "%i x %i x %i", (unsigned int)prop.maxGridSize[0], (unsigned int)prop.maxGridSize[1], (unsigned int)prop.maxGridSize[2]); printTag(aD,"MaximumGridSize",aD->message,"maximum size of a grid of thread blocks"); sprintf(aD->message, "%i x %i x %i", (unsigned int)prop.maxThreadsDim[0], (unsigned int)prop.maxThreadsDim[1], (unsigned int)prop.maxThreadsDim[2]); printTag(aD,"MaximumThreadSize",aD->message,"the maximum size of each dimension of a block"); printTag(aD,"WarpSize",(unsigned int)(prop.warpSize)); printTag(aD,"MaxWarpsPerMultiProcessor",(unsigned int)(aD->gi->maxResidentWarps),"maximum number of resident warps per multiprocessor"); printTag(aD,"MaxThreadsPerBlock",(unsigned int)(prop.maxThreadsPerBlock)); // printTag(aD,"MaxThreadsPerMultiProcessor",prop.maxThreadsPerMultiProcessor); printTag(aD,"MaxBlocksPerMultiProcessor",(unsigned int)(aD->gi->maxResidentBlocks),"maximum number of resident blocks per multiprocessor"); printTag(aD,"RegistersPerBlock",(unsigned int)(prop.regsPerBlock),"maximum number of 32-bit registers available to a thread block"); printTag(aD,"SharedMemoryPerBlock",(unsigned int)(prop.sharedMemPerBlock),"maximum amount of shared memory available to a thread block in bytes"); printTag(aD,"SharedMemoryBanks",(unsigned int)(aD->gi->sharedMemBanks),"number of shared memory banks"); // printTag(aD,"L2cache",prop.l2CacheSize,"bytes"); printTagEnd(aD);//GPUDevice printTagEnd(aD);//GPUDevices return true; } bool fbp_axial_cuda(allData *aD){ gpu_info *gi; xData *xd; unsigned int mb, mt, nx, ny; unsigned int uu, ub, maxt; float fnr, da; size_t mem_size, mem_size_c; float *dev_in, *dev_out; cufftComplex *dev_filter, *dev_data; cufftHandle plan; timestamp("Starting fbp_axial_cuda",4); xd = aD->data; gi = aD->gi; nx = aD->ta_nx; ny = aD->ny; mb = gi->maxResidentBlocks; mt = gi->maxResidentThreads; maxt = mt/mb; uu = 1; while(maxt>1){ uu*=2; maxt/=2; } maxt = uu; ub = nx/maxt; da = aD->gi->rotAngleStep; fnr = 0.5f*float(nx-1); dim3 grids_p(ub,1,1); dim3 threads_p(maxt,1,1); mem_size = nx*sizeof(float); mem_size_c = nx*sizeof(cufftComplex); timestamp("calling cuda to automatically get a device",4); cudaFree(NULL); // cudaSetDevice(aD->hms->fbp->GPUDeviceNumber); { int MyDevice; char devmsg[128]; cudaGetDevice(&MyDevice); snprintf(devmsg,128,"my device %i",MyDevice); timestamp(devmsg,4); } timestamp("calling cudaMalloc",4); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_filter,mem_size_c)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_data,mem_size_c)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_in,mem_size)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_out,mem_size)); timestamp("finished cudaMalloc",4); CUDA_SAFE_CALL(cudaMemcpy(dev_filter,xd->veccF,mem_size_c,cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(dev_data,xd->veccI,mem_size_c,cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaThreadSynchronize()); HMFFT_SAFE_CALL(cufftPlan1d(&plan, nx, CUFFT_C2C, 1)); HMFFT_SAFE_CALL(cufftExecC2C(plan, dev_data, dev_data, CUFFT_FORWARD)); CUDA_SAFE_CALL(cudaThreadSynchronize()); cprod<<<grids_p,threads_p>>>(dev_data,dev_filter); CUDA_SAFE_CALL(cudaThreadSynchronize()); HMFFT_SAFE_CALL(cufftExecC2C(plan, dev_data, dev_data, CUFFT_INVERSE)); CUDA_SAFE_CALL(cudaThreadSynchronize()); creal<<<grids_p,threads_p>>>(dev_data,dev_in); CUDA_SAFE_CALL(cudaThreadSynchronize()); cufftDestroy(plan); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); texA.normalized = false; texA.filterMode = cudaFilterModeLinear; texA.addressMode[0] = cudaAddressModeClamp; CUDA_SAFE_CALL(cudaBindTexture(NULL, texA, dev_in, channelDesc, mem_size)); fbp_axial<<<grids_p, threads_p>>>(dev_out, da, nx, ny, fnr); CUDA_SAFE_CALL(cudaThreadSynchronize()); CUDA_SAFE_CALL(cudaMemcpy(xd->vecto, dev_out, mem_size, cudaMemcpyDeviceToHost)); cudaFree(dev_in); dev_in = NULL; cudaFree(dev_out); dev_out = NULL; cudaFree(dev_data); dev_data = NULL; cudaFree(dev_filter); dev_filter = NULL; timestamp("Finishing fbp_axial_cuda",4); return true; } __global__ void fbp_std2(float* temp_dev, unsigned int nxo, unsigned int mw, float xc, float *dev_CS, unsigned int vH, unsigned int chnum){ extern __shared__ float shm[]; float2 *vcs = (float2 *)&shm[0]; float sum, p, fitt; float x, y; unsigned int ixx, iyy, itt, j, tib, tpb; tib = threadIdx.y * blockDim.x + threadIdx.x; tpb = blockDim.x * blockDim.y; ixx = blockIdx.x*blockDim.x + threadIdx.x; iyy = blockIdx.y*blockDim.y + threadIdx.y; itt = ixx+iyy*nxo; fitt = float(itt); x = tex1Dfetch(texX,fitt); y = tex1Dfetch(texY,fitt); sum = 0.0f; shm[tib] = dev_CS[tib+chnum*tpb]; __syncthreads(); for(j=0; j<vH; j++){ p = xc + x*vcs[j].x+ y*vcs[j].y + float(j*mw); sum += (tex1Dfetch(texA,p)); } temp_dev[itt] += sum; } bool fbp_cuda(allData *aD){ xData *xd = aD->data; size_t mem_CS; size_t mem_out, mem_in; size_t mem_chunk; size_t mem_comp; size_t mem_shared; float *dev_in, *dev_out; float *dev_X, *dev_Y; float *dev_CS; unsigned int hTimer; unsigned int nx, ny, nxo, nyo, wo, ho; unsigned int cl, cw; unsigned int vH, sp; cufftComplex *dev_fc, *dev_inc; cufftHandle plan; double gpuTime; xFBP_gpu *fg; timestamp("starting fbp_cuda",4); fg = aD->fg; nx = fg->nx; ny = fg->ny; nxo = fg->nxo; nyo = fg->nyo; wo = fg->blockWidth; ho = fg->blockHeight; cl = fg->chunkLeft; cw = fg->chunkWidth; vH = fg->vH; mem_shared = wo*ho*sizeof(float); mem_comp = nx*vH*sizeof(cufftComplex); mem_chunk = cw*vH*sizeof(float); mem_CS = 2*ny*sizeof(float); mem_out = nxo*nyo*sizeof(float); mem_in = nx*ny*sizeof(float); dim3 grids_in(nx*vH/(wo*ho),1); dim3 threads_in(wo*ho,1); dim3 grids_ch(cw/wo,vH/ho,1); dim3 threads_ch(wo,ho,1); dim3 grids_bp(nxo/wo,nyo/ho,1); dim3 threads_bp(wo,ho,1); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); CUT_SAFE_CALL( cutResetTimer(hTimer)); CUT_SAFE_CALL( cutStartTimer(hTimer)); timestamp("calling cudaMalloc",4); { int MyDevice; char devmsg[128]; cudaGetDevice(&MyDevice); snprintf(devmsg,128,"my device %i",MyDevice); timestamp(devmsg,4); } CUDA_SAFE_CALL(cudaMalloc((void**)&dev_fc,mem_comp)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_inc,mem_comp)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_in,mem_in)); timestamp("finished cudaMalloc",4); CUDA_SAFE_CALL(cudaMemcpy(dev_fc,xd->veccF,mem_comp,cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(dev_in,xd->vta,mem_in,cudaMemcpyHostToDevice)); CUFFT_SAFE_CALL(cufftPlan1d(&plan, nx, CUFFT_C2C, vH)); texA.normalized = false; texA.filterMode = cudaFilterModeLinear; texA.addressMode[0] = cudaAddressModeClamp; texX.normalized = false; texX.filterMode = cudaFilterModeLinear; texX.addressMode[0] = cudaAddressModeClamp; texY.normalized = false; texY.filterMode = cudaFilterModeLinear; texY.addressMode[0] = cudaAddressModeClamp; timestamp("calling cudaMalloc",4); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_CS,mem_CS)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_out,mem_out)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_X,mem_out)); CUDA_SAFE_CALL(cudaMalloc((void**)&dev_Y,mem_out)); timestamp("finished cudaMalloc",4); CUDA_SAFE_CALL(cudaMemcpy(dev_X,xd->vecX,mem_out,cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(dev_Y,xd->vecY,mem_out,cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(dev_CS,xd->vecCS,mem_CS,cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaBindTexture(NULL, texA, dev_in, channelDesc, mem_chunk)); CUDA_SAFE_CALL(cudaBindTexture(NULL, texX, dev_X, channelDesc, mem_out)); CUDA_SAFE_CALL(cudaBindTexture(NULL, texY, dev_Y, channelDesc, mem_out)); set_zero<<<grids_bp, threads_bp>>>(dev_out, nxo); CUDA_SAFE_CALL( cudaThreadSynchronize() ); for(unsigned int i=0;i<fg->numChunks; i++){ sp = i * nx * vH; cuda_data_r2c<<<grids_in,threads_in>>>(dev_inc,dev_in,sp); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUFFT_SAFE_CALL(cufftExecC2C(plan, dev_inc, dev_inc, CUFFT_FORWARD)); CUDA_SAFE_CALL( cudaThreadSynchronize() ); cuda_mul_c<<<grids_in,threads_in>>>(dev_fc,dev_inc); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUFFT_SAFE_CALL(cufftExecC2C(plan, dev_inc, dev_inc, CUFFT_INVERSE)); CUDA_SAFE_CALL( cudaThreadSynchronize() ); cuda_data_c2r_3<<<grids_ch,threads_ch>>>(dev_in, dev_inc, cl, nx, cw, 0); CUDA_SAFE_CALL( cudaThreadSynchronize() ); fbp_std2<<<grids_bp, threads_bp, mem_shared>>>(dev_out, nxo, cw, fg->xc, dev_CS, vH, i); CUDA_SAFE_CALL( cudaThreadSynchronize() ); } cufftDestroy(plan); cudaFree(dev_inc); dev_inc = NULL; cudaFree(dev_fc); dev_fc = NULL; CUDA_SAFE_CALL(cudaMemcpy(xd->vto,dev_out,mem_out,cudaMemcpyDeviceToHost)); cudaFree(dev_in); dev_in = NULL; cudaFree(dev_out); dev_out = NULL; cudaFree(dev_CS); dev_CS = NULL; cudaFree(dev_X); dev_X = NULL; cudaFree(dev_Y); dev_Y = NULL; CUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printTag(aD,"BackprojectionTime",(float)gpuTime,"in ms"); timestamp("finished fbp_cuda",4); return true; } __global__ void xb_proj_new(unsigned int indexs, unsigned int starty, float *vif, float *vecxy, float *vec_sc, float xc, unsigned int vH){ unsigned int ij, ijk, tib, i; float s; float2 cs, xy; tib = (blockDim.x*threadIdx.y + threadIdx.x)/2; cs = *(float2 *)(&vec_sc[2*(tib+starty)]); ij = 2*blockIdx.x + indexs; for(i = 0; i<2; i++){ xy = *(float2 *)(&vecxy[2*ij]); ijk = ij*vH+tib; s = __fmaf_rn(xy.x,cs.x,xc); vif[ijk] = __fmaf_rn(xy.y,cs.y,s); ij++; } } __global__ void xb_proj2_new(unsigned int starty, float *vec_x, float *vecxy, float *vec_cs){ unsigned int ij, ijk, tib, i, tpb; float s; float2 cs, xy; tpb = blockDim.x * blockDim.y; tib = (blockDim.x*threadIdx.y + threadIdx.x)/2; cs = *(float2 *)(&vec_cs[2*(tib+starty)]); ij = 2*blockIdx.x; for(i = 0; i<2; i++){ xy = *(float2 *)(&vecxy[2*ij]); ijk = tib*tpb+ij; s = __fmul_rn(xy.x,cs.x); vec_x[ijk] = __fmaf_rn(xy.y,cs.y,s); ij++; } } __global__ void fbp_std5(float *vb, float *veco, float *vt, unsigned int nx, unsigned int vH, unsigned int nxo, unsigned int nxb){ float rr, sum; unsigned int ixq, ivc, ic; unsigned int tib, tig, big, tpb; tib = blockDim.x*threadIdx.y + threadIdx.x; big = blockIdx.y*nxb + blockIdx.x; tig = (blockIdx.y*blockDim.y + threadIdx.y)*nxo + blockIdx.x*blockDim.x + threadIdx.x; tpb = blockDim.x * blockDim.y; ixq = tib; ivc = big*vH; sum = 0.0f; for(ic = 0; ic<vH; ic++){ rr = __fadd_rn(vt[ixq],vb[ivc+ic]); sum += tex1Dfetch(texA,rr+float(ic*nx)); ixq += tpb; } veco[tig] += sum; } bool fbp_cuda_20(allData *aD){ xData *xd = aD->data; unsigned int nx, ny, nxo, nyo; unsigned int wo, ho; unsigned int nxb, nyb; unsigned int mt, mb; unsigned int uu, ub; unsigned int maxt; unsigned int vH; unsigned int nf, sp; unsigned int starty, indexs; unsigned int nxyb, nlen, nxyb2; unsigned int hTimer; size_t mem_fc, mem_chunk; size_t mem_in, mem_out; size_t mem_CS, mem_block_xy; size_t mem_thread_x, mem_thread_xy, mem_block; float xc; float *dev_thread_x, *dev_thread_xy, *dev_block; float *dev_in, *dev_out; float *dev_block_xy; float *dev_CS; gpu_info *gi; double gpuTime; cufftComplex *dev_fc, *dev_inc; cufftHandle plan; timestamp("starting fbp_cuda_20",4); gi = aD->gi; xc = aD->new_xc; vH = gi->vertH; wo = gi->wo; ho = gi->ho; nx = aD->ta_nx; ny = aD->ta_ny; nxo = gi->mxo; nyo = gi->myo; nxb = nxo/wo; nyb = nyo/ho; mem_block = nxb*nyb*vH*sizeof(float); mem_thread_x = wo*ho*vH*sizeof(float); mem_thread_xy = 2*wo*ho*sizeof(float); mem_block_xy = 2 * nxb * nyb * sizeof(float); mem_in = nx * ny *sizeof(float); mem_out = nxo * nyo *sizeof(float); mem_CS = 2*ny*sizeof(float); mem_fc = nx * vH * sizeof(cufftComplex); mem_chunk = nx * vH * sizeof(float); mb = gi->maxResidentBlocks; mt = gi->maxResidentThreads; maxt = mt/mb; uu = 1; while(maxt>1){ uu*=2; maxt/=2; } maxt = uu; ub = nx/maxt; nf = ny/vH; nxyb = nxb*nyb; nlen = (nxyb/4); if(nxyb%4 > 0) nlen++; nxyb2 = nxyb-2*nlen; dim3 threads_xb(1,1,1); dim3 threads_1(maxt,1,1); dim3 threads_bp(wo,ho,1); dim3 grid_xb2(vH,1,1); dim3 grid_xb_new(nlen,1,1); dim3 grids_2(ub*vH,1,1); dim3 grid_bp(nxb,nyb,1); HMCUT_SAFE_CALL( cutCreateTimer(&hTimer) ); HMCUT_SAFE_CALL( cutResetTimer(hTimer)); HMCUT_SAFE_CALL( cutStartTimer(hTimer)); timestamp("calling cudaMalloc ",4); { int MyDevice; char devmsg[128]; cudaGetDevice(&MyDevice); snprintf(devmsg,128,"my device %i",MyDevice); timestamp(devmsg,4); } HM_SAFE_CALL(cudaMalloc((void**)&dev_block,mem_block)); HM_SAFE_CALL(cudaMalloc((void**)&dev_thread_x,mem_thread_x)); HM_SAFE_CALL(cudaMalloc((void**)&dev_thread_xy,mem_thread_xy)); HM_SAFE_CALL(cudaMalloc((void**)&dev_fc,mem_fc)); HM_SAFE_CALL(cudaMalloc((void**)&dev_inc,mem_fc)); HM_SAFE_CALL(cudaMalloc((void**)&dev_in,mem_in)); HM_SAFE_CALL(cudaMalloc((void**)&dev_out,mem_out)); HM_SAFE_CALL(cudaMalloc((void**)&dev_block_xy,mem_block_xy)); HM_SAFE_CALL(cudaMalloc((void**)&dev_CS,mem_CS)); timestamp("finished cudaMalloc ",4); HM_SAFE_CALL(cudaMemcpy(dev_fc,xd->veccF,mem_fc,cudaMemcpyHostToDevice)); HM_SAFE_CALL(cudaMemcpy(dev_in,xd->vta,mem_in,cudaMemcpyHostToDevice)); HM_SAFE_CALL(cudaMemcpy(dev_block_xy,xd->vecbXY,mem_block_xy,cudaMemcpyHostToDevice)); HM_SAFE_CALL(cudaMemcpy(dev_thread_xy,xd->vecXY_block,mem_thread_xy,cudaMemcpyHostToDevice)); HM_SAFE_CALL(cudaMemcpy(dev_CS,xd->vecCS,mem_CS,cudaMemcpyHostToDevice)); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); texA.normalized = false; texA.filterMode = cudaFilterModeLinear; texA.addressMode[0] = cudaAddressModeClamp; CUDA_SAFE_CALL(cudaBindTexture(NULL, texA, dev_in, channelDesc, mem_chunk)); /*rca: AARGH -- plan is done for each slice -- this defeats the purpose of planning the FFT */ timestamp("before plan creation",4); HMFFT_SAFE_CALL(cufftPlan1d(&plan, nx, CUFFT_C2C, vH)); timestamp("after plan creation",4); set_zero<<<grid_bp, threads_bp>>>(dev_out, nxo); HM_SAFE_CALL( cudaThreadSynchronize() ); timestamp("before backprojection loop ",4); for(unsigned int i = 0;i<nf;i++){ sp = i*nx*vH; cuda_data_r2c<<<grids_2,threads_1>>>(dev_inc,dev_in,sp); HM_SAFE_CALL( cudaThreadSynchronize() ); HMFFT_SAFE_CALL(cufftExecC2C(plan, dev_inc, dev_inc, CUFFT_FORWARD)); HM_SAFE_CALL( cudaThreadSynchronize() ); cuda_mul_c<<<grids_2,threads_1>>>(dev_fc,dev_inc); HM_SAFE_CALL( cudaThreadSynchronize() ); HMFFT_SAFE_CALL(cufftExecC2C(plan, dev_inc, dev_inc, CUFFT_INVERSE)); HM_SAFE_CALL( cudaThreadSynchronize() ); sp = 0; cuda_data_c2r<<<grids_2,threads_1>>>(dev_in,dev_inc,sp); HM_SAFE_CALL( cudaThreadSynchronize() ); starty = i*vH; indexs = 0; xb_proj_new<<<grid_xb_new, threads_bp>>>(indexs, starty, dev_block, dev_block_xy, dev_CS, xc, vH); HM_SAFE_CALL( cudaThreadSynchronize() ); indexs = nxyb2; xb_proj_new<<<grid_xb_new, threads_bp>>>(indexs, starty, dev_block, dev_block_xy, dev_CS, xc, vH); HM_SAFE_CALL( cudaThreadSynchronize() ); xb_proj2_new<<<grid_xb2, threads_bp>>>(starty, dev_thread_x, dev_thread_xy, dev_CS); HM_SAFE_CALL( cudaThreadSynchronize() ); fbp_std5<<<grid_bp, threads_bp>>>(dev_block, dev_out, dev_thread_x, nx, vH, nxo, nxb); HM_SAFE_CALL( cudaThreadSynchronize() ); } timestamp("after backprojection loop ",4); cufftDestroy(plan); timestamp("after plan destruction",4); HM_SAFE_CALL(cudaMemcpy(xd->vto, dev_out, mem_out, cudaMemcpyDeviceToHost)); cudaFree(dev_fc); dev_fc = NULL; cudaFree(dev_inc); dev_inc = NULL; cudaFree(dev_in); dev_in = NULL; cudaFree(dev_block); dev_block = NULL; cudaFree(dev_thread_x); dev_thread_x = NULL; cudaFree(dev_thread_xy); dev_thread_xy = NULL; cudaFree(dev_out); dev_out = NULL; cudaFree(dev_CS); dev_CS = NULL; cudaFree(dev_block_xy); dev_block_xy = NULL; HMCUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printTag(aD,"TimeBackprojection",float(gpuTime),"time (in ms)"); timestamp("finished fbp_cuda_20",4); return true; } __global__ void fbp_cpu2(float *dev_in, float *dev_pol, float *dev_Cos, unsigned int starty, unsigned int nx, unsigned int na, unsigned int vH, unsigned int pola, float ps, float xc_new){ float x, xx, sum; unsigned int ic, irr; unsigned int tib, tig, tpb; int iaa, aa, ia, dr, dm; tib = blockDim.x*threadIdx.y + threadIdx.x; iaa = blockIdx.x*blockDim.x + threadIdx.x; irr = blockIdx.y*blockDim.y + threadIdx.y; tig = irr*pola + iaa; tpb = blockDim.x * blockDim.y; x = ps * float(irr); sum = 0.0f; float t; ia = starty + iaa; dm = ia/na; dr = ia - dm * na; if(dm%2 == 0){ t= 1.0f; }else{ t= -1.0f; } for(ic = 0; ic<vH; ic++){ if(dr == na){ dr = 0; t*= -1.0f; } xx = xc_new + t* x * tex1Dfetch(texCos, dr); sum += tex1Dfetch(texA,xx + float(ic*nx)); dr++; } dev_pol[tig] += sum; } bool fbp_cuda_cpu2(allData *aD){ xData *xd = aD->data; unsigned int nx, ny, polr, pola, nba, nbr; unsigned int wo, ho; unsigned int mt, mb; unsigned int uu, ub; unsigned int maxt; unsigned int vH; unsigned int nf, sp, na; unsigned int starty, indexs; unsigned int hTimer; size_t mem_fc, mem_chunk; size_t mem_in; size_t mem_Cos, mem_pol; float xc, ps; float *dev_in, *dev_pol; float *dev_Cos; gpu_info *gi; double gpuTime; timestamp("calling fbp_cuda_cpu2",4); gi = aD->gi; pola = gi->pol_a; polr = gi->pol_r; ps = gi->outputPixelSize; cufftComplex *dev_fc, *dev_inc; cufftHandle plan; xc = aD->new_xc; vH = gi->vertH; wo = gi->wo; ho = gi->ho; nx = aD->ta_nx; ny = aD->ta_ny; na = aD->ta_ny_13; nba = pola/wo; nbr = polr/ho; printf("pola: %i, polr: %i\n",pola, polr); mem_in = nx * ny *sizeof(float); mem_pol = pola *polr *sizeof(float); mem_Cos = na*sizeof(float); mem_fc = nx * vH * sizeof(cufftComplex); mem_chunk = nx * vH * sizeof(float); mb = gi->maxResidentBlocks; mt = gi->maxResidentThreads; maxt = mt/mb; uu = 1; while(maxt>1){ uu*=2; maxt/=2; } maxt = uu; ub = nx/maxt; nf = ny/vH; dim3 threads_1(maxt,1,1); dim3 threads_bp(wo,ho,1); dim3 grids_2(ub*vH,1,1); dim3 grid_bp(nba,nbr,1); HMCUT_SAFE_CALL( cutCreateTimer(&hTimer) ); HMCUT_SAFE_CALL( cutResetTimer(hTimer)); HMCUT_SAFE_CALL( cutStartTimer(hTimer)); { int MyDevice; char devmsg[128]; cudaGetDevice(&MyDevice); snprintf(devmsg,128,"my device %i",MyDevice); timestamp(devmsg,4); } HM_SAFE_CALL(cudaMalloc((void**)&dev_fc,mem_fc)); HM_SAFE_CALL(cudaMalloc((void**)&dev_inc,mem_fc)); HM_SAFE_CALL(cudaMalloc((void**)&dev_in,mem_in)); HM_SAFE_CALL(cudaMalloc((void**)&dev_pol,mem_pol)); HM_SAFE_CALL(cudaMalloc((void**)&dev_Cos,mem_Cos)); HM_SAFE_CALL(cudaMemcpy(dev_fc,xd->veccF,mem_fc,cudaMemcpyHostToDevice)); HM_SAFE_CALL(cudaMemcpy(dev_in,xd->vta,mem_in,cudaMemcpyHostToDevice)); HM_SAFE_CALL(cudaMemcpy(dev_Cos,xd->vecCos,mem_Cos,cudaMemcpyHostToDevice)); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); texA.normalized = false; texA.filterMode = cudaFilterModeLinear; texA.addressMode[0] = cudaAddressModeClamp; texCos.normalized = false; texCos.filterMode = cudaFilterModeLinear; texCos.addressMode[0] = cudaAddressModeClamp; CUDA_SAFE_CALL(cudaBindTexture(NULL, texA, dev_in, channelDesc, mem_chunk)); CUDA_SAFE_CALL(cudaBindTexture(NULL, texCos, dev_Cos, channelDesc, mem_Cos)); HMFFT_SAFE_CALL(cufftPlan1d(&plan, nx, CUFFT_C2C, vH)); set_zero<<<grid_bp, threads_bp>>>(dev_pol, nba); HM_SAFE_CALL( cudaThreadSynchronize() ); for(unsigned int i = 0;i<nf;i++){ //for(unsigned int i = 0;i<1;i++){ sp = i*nx*vH; cuda_data_r2c<<<grids_2,threads_1>>>(dev_inc,dev_in,sp); HM_SAFE_CALL( cudaThreadSynchronize() ); HMFFT_SAFE_CALL(cufftExecC2C(plan, dev_inc, dev_inc, CUFFT_FORWARD)); HM_SAFE_CALL( cudaThreadSynchronize() ); cuda_mul_c<<<grids_2,threads_1>>>(dev_fc,dev_inc); HM_SAFE_CALL( cudaThreadSynchronize() ); HMFFT_SAFE_CALL(cufftExecC2C(plan, dev_inc, dev_inc, CUFFT_INVERSE)); HM_SAFE_CALL( cudaThreadSynchronize() ); sp = 0; cuda_data_c2r<<<grids_2,threads_1>>>(dev_in,dev_inc,sp); HM_SAFE_CALL( cudaThreadSynchronize() ); starty = i*vH; fbp_cpu2<<<grid_bp, threads_bp>>>(dev_in, dev_pol, dev_Cos, starty, nx, na, vH, pola, ps, xc); HM_SAFE_CALL( cudaThreadSynchronize() ); } cufftDestroy(plan); HM_SAFE_CALL(cudaMemcpy(xd->vecPol, dev_pol, mem_pol, cudaMemcpyDeviceToHost)); cudaFree(dev_fc); dev_fc = NULL; cudaFree(dev_inc); dev_inc = NULL; cudaFree(dev_in); dev_in = NULL; cudaFree(dev_pol); dev_pol = NULL; cudaFree(dev_Cos); dev_Cos = NULL; HMCUT_SAFE_CALL( cutStopTimer(hTimer) ); gpuTime = cutGetTimerValue(hTimer); printTag(aD,"TimeBackprojection",float(gpuTime),"time (in ms)"); return true; }
af4cacfb92c2db1e6510f2f71b7df30d8c64050c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix addition: C = alpha*A + beta*B, where alpha and beta are two scalars. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> // includes, project //#include <cutil.h> // includes, kernels #include "matrixadd_kernel.cu" #include "matrixadd.h" //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold( float*, const float*, const float, const float*, const float, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); bool CompareResults(float* A, float* B, int elements, float eps, float * error); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void MakeN(Matrix* M, Matrix* N); void MatrixAddOnDevice(const Matrix M, const float alpha, const Matrix N, const float beta, Matrix P, float * inc, float * exc); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // Matrices for the program Matrix M; Matrix N; Matrix P; // Number of elements in the solution matrix // Assuming square matrices, so the sizes of M, N and P are equal unsigned int size_elements = WP * HP; int errorM = 0; srand(2012); if(argc != 2) { printf("Error Usage ./problem2 u\n"); } int u=atoi(argv[1]); char filename[100]="problem2.inp"; // Check command line for input matrix files if(u==0) { // No inputs provided // Allocate and initialize the matrices M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1); N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1); P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); } else { // Inputs provided // Allocate and read source matrices from disk M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); errorM = ReadFile(&M, filename); MakeN(&M, &N); // check for read errors if(errorM != size_elements) { printf("Error reading input files %d\n", errorM); return 1; } } // alpha*M + beta*N on the device float alpha = 1.f; float beta = 1.f; //time the operation float inclusiveTime, exclusiveTime,norm=0; MatrixAddOnDevice(M, alpha, N, beta, P,&inclusiveTime,&exclusiveTime); // compute the matrix addition on the CPU for comparison Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); hipError_t error; hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } computeGold(reference.elements, M.elements, alpha, N.elements, beta, HM, WM); // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // check if the device result is equivalent to the expected solution bool res = CompareResults(reference.elements, P.elements, size_elements, 0.0001f,&norm); if(res==0)printf("Test failed\n"); // This should not be printed in the correct implementation printf("%f\n%f\n%f\n%f\n",sqrt(norm),msecTotal,inclusiveTime, exclusiveTime); // Free host matrices free(M.elements); M.elements = NULL; free(N.elements); N.elements = NULL; free(P.elements); P.elements = NULL; return 0; } //////////////////////////////////////////////////////////////////////////////// //! wrapper around the device implementation //////////////////////////////////////////////////////////////////////////////// void MatrixAddOnDevice(const Matrix M, const float alpha, const Matrix N, const float beta, Matrix P,float * inc,float *exc) // ADD YOUR CODE HERE { hipEvent_t startEvent_inc, stopEvent_inc, startEvent_exc, stopEvent_exc; hipEventCreate(&startEvent_inc); hipEventCreate(&stopEvent_inc); hipEventCreate(&startEvent_exc); hipEventCreate(&stopEvent_exc); float elapsedTime_inc, elapsedTime_exc; hipEventRecord(startEvent_inc,0); // starting timing for inclusive //Allocate device matrices Matrix dM = AllocateDeviceMatrix(M); Matrix dN = AllocateDeviceMatrix(N); Matrix dP = AllocateDeviceMatrix(M); // copy matrices to device CopyToDeviceMatrix(dM, M); CopyToDeviceMatrix(dN, N); hipEventRecord(startEvent_exc,0); // staring timing for exclusive int thread_per_block = 32; int block_per_grid = MATRIX_SIZE * MATRIX_SIZE / thread_per_block; //launch kernel hipLaunchKernelGGL(( MatrixAddKernel) , dim3(block_per_grid) , dim3(thread_per_block) , 0, 0, dM.elements, alpha, dN.elements, beta, dP.elements); hipEventRecord(stopEvent_exc,0); // ending timing for exclusive hipEventSynchronize(stopEvent_exc); hipEventElapsedTime(&elapsedTime_exc, startEvent_exc, stopEvent_exc); // Read P from the device CopyFromDeviceMatrix(P, dP); hipEventRecord(stopEvent_inc,0); //ending timing for inclusive hipEventSynchronize(stopEvent_inc); hipEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc); *inc = elapsedTime_inc; *exc = elapsedTime_exc; } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { hipError_t error; Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); error = hipMalloc((void**)&Mdevice.elements, size); if (error != hipSuccess) { printf("hipMalloc returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } return Mdevice; } // Allocate a matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } //compare the data stored in two arrays on the host bool CompareResults(float* A, float* B, int elements, float eps,float * error) { for(unsigned int i = 0; i < elements; i++){ float temp = sqrt((A[i]-B[i])*(A[i]-B[i])); *error+=temp; if(temp>eps){ return false; } } return true; } // Read a 16x16 floating point matrix in from file int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE; std::ifstream ifile(file_name); for(unsigned int i = 0; i < data_read; i++){ ifile>>M->elements[i]; } ifile.close(); return data_read; } // Read a 16x16 floating point matrix in from file void MakeN(Matrix* M, Matrix* N) { unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE; for(unsigned int i = 0; i < data_read; i++){ N->elements[i]=1.f/(0.2f+M->elements[i]); } } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { std::ofstream ofile(file_name); for(unsigned int i = 0; i < M.width*M.height; i++){ ofile<<M.elements[i]; } ofile.close(); }
af4cacfb92c2db1e6510f2f71b7df30d8c64050c.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix addition: C = alpha*A + beta*B, where alpha and beta are two scalars. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> // includes, project //#include <cutil.h> // includes, kernels #include "matrixadd_kernel.cu" #include "matrixadd.h" //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold( float*, const float*, const float, const float*, const float, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); bool CompareResults(float* A, float* B, int elements, float eps, float * error); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void MakeN(Matrix* M, Matrix* N); void MatrixAddOnDevice(const Matrix M, const float alpha, const Matrix N, const float beta, Matrix P, float * inc, float * exc); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // Matrices for the program Matrix M; Matrix N; Matrix P; // Number of elements in the solution matrix // Assuming square matrices, so the sizes of M, N and P are equal unsigned int size_elements = WP * HP; int errorM = 0; srand(2012); if(argc != 2) { printf("Error Usage ./problem2 u\n"); } int u=atoi(argv[1]); char filename[100]="problem2.inp"; // Check command line for input matrix files if(u==0) { // No inputs provided // Allocate and initialize the matrices M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1); N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1); P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); } else { // Inputs provided // Allocate and read source matrices from disk M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); errorM = ReadFile(&M, filename); MakeN(&M, &N); // check for read errors if(errorM != size_elements) { printf("Error reading input files %d\n", errorM); return 1; } } // alpha*M + beta*N on the device float alpha = 1.f; float beta = 1.f; //time the operation float inclusiveTime, exclusiveTime,norm=0; MatrixAddOnDevice(M, alpha, N, beta, P,&inclusiveTime,&exclusiveTime); // compute the matrix addition on the CPU for comparison Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0); cudaError_t error; cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } computeGold(reference.elements, M.elements, alpha, N.elements, beta, HM, WM); // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // check if the device result is equivalent to the expected solution bool res = CompareResults(reference.elements, P.elements, size_elements, 0.0001f,&norm); if(res==0)printf("Test failed\n"); // This should not be printed in the correct implementation printf("%f\n%f\n%f\n%f\n",sqrt(norm),msecTotal,inclusiveTime, exclusiveTime); // Free host matrices free(M.elements); M.elements = NULL; free(N.elements); N.elements = NULL; free(P.elements); P.elements = NULL; return 0; } //////////////////////////////////////////////////////////////////////////////// //! wrapper around the device implementation //////////////////////////////////////////////////////////////////////////////// void MatrixAddOnDevice(const Matrix M, const float alpha, const Matrix N, const float beta, Matrix P,float * inc,float *exc) // ADD YOUR CODE HERE { cudaEvent_t startEvent_inc, stopEvent_inc, startEvent_exc, stopEvent_exc; cudaEventCreate(&startEvent_inc); cudaEventCreate(&stopEvent_inc); cudaEventCreate(&startEvent_exc); cudaEventCreate(&stopEvent_exc); float elapsedTime_inc, elapsedTime_exc; cudaEventRecord(startEvent_inc,0); // starting timing for inclusive //Allocate device matrices Matrix dM = AllocateDeviceMatrix(M); Matrix dN = AllocateDeviceMatrix(N); Matrix dP = AllocateDeviceMatrix(M); // copy matrices to device CopyToDeviceMatrix(dM, M); CopyToDeviceMatrix(dN, N); cudaEventRecord(startEvent_exc,0); // staring timing for exclusive int thread_per_block = 32; int block_per_grid = MATRIX_SIZE * MATRIX_SIZE / thread_per_block; //launch kernel MatrixAddKernel <<< block_per_grid , thread_per_block >>> (dM.elements, alpha, dN.elements, beta, dP.elements); cudaEventRecord(stopEvent_exc,0); // ending timing for exclusive cudaEventSynchronize(stopEvent_exc); cudaEventElapsedTime(&elapsedTime_exc, startEvent_exc, stopEvent_exc); // Read P from the device CopyFromDeviceMatrix(P, dP); cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive cudaEventSynchronize(stopEvent_inc); cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc); *inc = elapsedTime_inc; *exc = elapsedTime_exc; } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { cudaError_t error; Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); error = cudaMalloc((void**)&Mdevice.elements, size); if (error != cudaSuccess) { printf("cudaMalloc returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } return Mdevice; } // Allocate a matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } //compare the data stored in two arrays on the host bool CompareResults(float* A, float* B, int elements, float eps,float * error) { for(unsigned int i = 0; i < elements; i++){ float temp = sqrt((A[i]-B[i])*(A[i]-B[i])); *error+=temp; if(temp>eps){ return false; } } return true; } // Read a 16x16 floating point matrix in from file int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE; std::ifstream ifile(file_name); for(unsigned int i = 0; i < data_read; i++){ ifile>>M->elements[i]; } ifile.close(); return data_read; } // Read a 16x16 floating point matrix in from file void MakeN(Matrix* M, Matrix* N) { unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE; for(unsigned int i = 0; i < data_read; i++){ N->elements[i]=1.f/(0.2f+M->elements[i]); } } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { std::ofstream ofile(file_name); for(unsigned int i = 0; i < M.width*M.height; i++){ ofile<<M.elements[i]; } ofile.close(); }
9bbe112224a25a96b279e0b8fbcf22d5b915276c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_52 -cubin hconv_updat_u8_C64_K64.cu extern "C" __global__ void __launch_bounds__(64) hconv_updat_u8_C64_K64 ( unsigned int* param_Rand, unsigned short* param_F, const unsigned char* param_I, const unsigned short* param_E, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[64*8*4 + 6]; int tid = threadIdx.x; share[tid] = 1; *param_F = share[63-tid]; }
9bbe112224a25a96b279e0b8fbcf22d5b915276c.cu
/* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_52 -cubin hconv_updat_u8_C64_K64.cu extern "C" __global__ void __launch_bounds__(64) hconv_updat_u8_C64_K64 ( unsigned int* param_Rand, unsigned short* param_F, const unsigned char* param_I, const unsigned short* param_E, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[64*8*4 + 6]; int tid = threadIdx.x; share[tid] = 1; *param_F = share[63-tid]; }
137921a50966f0d85e952695745809119cb62dfc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2023, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/solver/idr_kernels.hpp" #include <ctime> #include <random> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include "core/components/fill_array_kernels.hpp" #include "cuda/base/config.hpp" #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/curand_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The IDR solver namespace. * * @ingroup idr */ namespace idr { constexpr int default_block_size = 512; constexpr int default_dot_dim = 32; constexpr int default_dot_size = default_dot_dim * default_dot_dim; #include "common/cuda_hip/solver/idr_kernels.hpp.inc" namespace { template <typename ValueType> void initialize_m(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, matrix::Dense<ValueType>* m, array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto m_stride = m->get_stride(); const auto grid_dim = ceildiv(m_stride * subspace_dim, default_block_size); hipLaunchKernelGGL(( initialize_m_kernel), dim3(grid_dim), dim3(default_block_size), 0, exec->get_stream(), subspace_dim, nrhs, as_device_type(m->get_values()), m_stride, as_device_type(stop_status->get_data())); } template <typename ValueType> void initialize_subspace_vectors(std::shared_ptr<const DefaultExecutor> exec, matrix::Dense<ValueType>* subspace_vectors, bool deterministic) { if (!deterministic) { auto gen = hiprand::rand_generator(std::random_device{}(), HIPRAND_RNG_PSEUDO_DEFAULT, exec->get_stream()); hiprand::rand_vector( gen, subspace_vectors->get_size()[0] * subspace_vectors->get_stride(), 0.0, 1.0, subspace_vectors->get_values()); } } template <typename ValueType> void orthonormalize_subspace_vectors( std::shared_ptr<const DefaultExecutor> exec, matrix::Dense<ValueType>* subspace_vectors) { hipLaunchKernelGGL(( orthonormalize_subspace_vectors_kernel<default_block_size>) , dim3(1), dim3(default_block_size), 0, exec->get_stream(), subspace_vectors->get_size()[0], subspace_vectors->get_size()[1], as_device_type(subspace_vectors->get_values()), subspace_vectors->get_stride()); } template <typename ValueType> void solve_lower_triangular(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* c, const array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs, default_block_size); hipLaunchKernelGGL(( solve_lower_triangular_kernel), dim3(grid_dim), dim3(default_block_size), 0, exec->get_stream(), subspace_dim, nrhs, as_device_type(m->get_const_values()), m->get_stride(), as_device_type(f->get_const_values()), f->get_stride(), as_device_type(c->get_values()), c->get_stride(), stop_status->get_const_data()); } template <typename ValueType> void update_g_and_u(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto size = g->get_size()[0]; const auto p_stride = p->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = 0; i < k; i++) { const auto p_i = p->get_const_values() + i * p_stride; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, alpha->get_values(), nrhs, zero<ValueType>()); hipLaunchKernelGGL(( multidot_kernel), dim3(grid_dim), dim3(block_dim), 0, exec->get_stream(), size, nrhs, as_device_type(p_i), as_device_type(g_k->get_values()), g_k->get_stride(), as_device_type(alpha->get_values()), stop_status->get_const_data()); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_values(), g_k->get_stride(), alpha->get_values()); } hipLaunchKernelGGL(( update_g_k_and_u_kernel<default_block_size>) , dim3(ceildiv(size * g_k->get_stride(), default_block_size)), dim3(default_block_size), 0, exec->get_stream(), k, i, size, nrhs, as_device_type(alpha->get_const_values()), as_device_type(m->get_const_values()), m->get_stride(), as_device_type(g->get_const_values()), g->get_stride(), as_device_type(g_k->get_values()), g_k->get_stride(), as_device_type(u->get_values()), u->get_stride(), stop_status->get_const_data()); } hipLaunchKernelGGL(( update_g_kernel<default_block_size>) , dim3(ceildiv(size * g_k->get_stride(), default_block_size)), dim3(default_block_size), 0, exec->get_stream(), k, size, nrhs, as_device_type(g_k->get_const_values()), g_k->get_stride(), as_device_type(g->get_values()), g->get_stride(), stop_status->get_const_data()); } template <typename ValueType> void update_m(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* m, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto size = g_k->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto p_stride = p->get_stride(); const auto m_stride = m->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = k; i < subspace_dim; i++) { const auto p_i = p->get_const_values() + i * p_stride; auto m_i = m->get_values() + i * m_stride + k * nrhs; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, m_i, nrhs, zero<ValueType>()); hipLaunchKernelGGL(( multidot_kernel), dim3(grid_dim), dim3(block_dim), 0, exec->get_stream(), size, nrhs, as_device_type(p_i), as_device_type(g_k->get_const_values()), g_k->get_stride(), as_device_type(m_i), stop_status->get_const_data()); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_const_values(), g_k->get_stride(), m_i); } } } template <typename ValueType> void update_x_r_and_f(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* g, const matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* r, matrix::Dense<ValueType>* x, const array<stopping_status>* stop_status) { const auto size = x->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(size * x->get_stride(), default_block_size); hipLaunchKernelGGL(( update_x_r_and_f_kernel), dim3(grid_dim), dim3(default_block_size), 0, exec->get_stream(), k, size, subspace_dim, nrhs, as_device_type(m->get_const_values()), m->get_stride(), as_device_type(g->get_const_values()), g->get_stride(), as_device_type(u->get_const_values()), u->get_stride(), as_device_type(f->get_values()), f->get_stride(), as_device_type(r->get_values()), r->get_stride(), as_device_type(x->get_values()), x->get_stride(), stop_status->get_const_data()); components::fill_array(exec, f->get_values() + k * f->get_stride(), nrhs, zero<ValueType>()); } } // namespace template <typename ValueType> void initialize(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* subspace_vectors, bool deterministic, array<stopping_status>* stop_status) { initialize_m(exec, nrhs, m, stop_status); initialize_subspace_vectors(exec, subspace_vectors, deterministic); orthonormalize_subspace_vectors(exec, subspace_vectors); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_INITIALIZE_KERNEL); template <typename ValueType> void step_1(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, const matrix::Dense<ValueType>* residual, const matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* v, const array<stopping_status>* stop_status) { solve_lower_triangular(exec, nrhs, m, f, c, stop_status); const auto num_rows = v->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); hipLaunchKernelGGL(( step_1_kernel), dim3(grid_dim), dim3(default_block_size), 0, exec->get_stream(), k, num_rows, subspace_dim, nrhs, as_device_type(residual->get_const_values()), residual->get_stride(), as_device_type(c->get_const_values()), c->get_stride(), as_device_type(g->get_const_values()), g->get_stride(), as_device_type(v->get_values()), v->get_stride(), stop_status->get_const_data()); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_1_KERNEL); template <typename ValueType> void step_2(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* omega, const matrix::Dense<ValueType>* preconditioned_vector, const matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* u, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto num_rows = preconditioned_vector->get_size()[0]; const auto subspace_dim = u->get_size()[1] / nrhs; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); hipLaunchKernelGGL(( step_2_kernel), dim3(grid_dim), dim3(default_block_size), 0, exec->get_stream(), k, num_rows, subspace_dim, nrhs, as_device_type(omega->get_const_values()), as_device_type(preconditioned_vector->get_const_values()), preconditioned_vector->get_stride(), as_device_type(c->get_const_values()), c->get_stride(), as_device_type(u->get_values()), u->get_stride(), stop_status->get_const_data()); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_2_KERNEL); template <typename ValueType> void step_3(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* residual, matrix::Dense<ValueType>* x, const array<stopping_status>* stop_status) { update_g_and_u(exec, nrhs, k, p, m, alpha, g, g_k, u, stop_status); update_m(exec, nrhs, k, p, g_k, m, stop_status); update_x_r_and_f(exec, nrhs, k, m, g, u, f, residual, x, stop_status); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_3_KERNEL); template <typename ValueType> void compute_omega( std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const remove_complex<ValueType> kappa, const matrix::Dense<ValueType>* tht, const matrix::Dense<remove_complex<ValueType>>* residual_norm, matrix::Dense<ValueType>* omega, const array<stopping_status>* stop_status) { const auto grid_dim = ceildiv(nrhs, config::warp_size); hipLaunchKernelGGL(( compute_omega_kernel), dim3(grid_dim), dim3(config::warp_size), 0, exec->get_stream(), nrhs, as_device_type(kappa), as_device_type(tht->get_const_values()), as_device_type(residual_norm->get_const_values()), as_device_type(omega->get_values()), stop_status->get_const_data()); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_COMPUTE_OMEGA_KERNEL); } // namespace idr } // namespace cuda } // namespace kernels } // namespace gko
137921a50966f0d85e952695745809119cb62dfc.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2023, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/solver/idr_kernels.hpp" #include <ctime> #include <random> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include "core/components/fill_array_kernels.hpp" #include "cuda/base/config.hpp" #include "cuda/base/cublas_bindings.hpp" #include "cuda/base/curand_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The IDR solver namespace. * * @ingroup idr */ namespace idr { constexpr int default_block_size = 512; constexpr int default_dot_dim = 32; constexpr int default_dot_size = default_dot_dim * default_dot_dim; #include "common/cuda_hip/solver/idr_kernels.hpp.inc" namespace { template <typename ValueType> void initialize_m(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, matrix::Dense<ValueType>* m, array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto m_stride = m->get_stride(); const auto grid_dim = ceildiv(m_stride * subspace_dim, default_block_size); initialize_m_kernel<<<grid_dim, default_block_size, 0, exec->get_stream()>>>( subspace_dim, nrhs, as_device_type(m->get_values()), m_stride, as_device_type(stop_status->get_data())); } template <typename ValueType> void initialize_subspace_vectors(std::shared_ptr<const DefaultExecutor> exec, matrix::Dense<ValueType>* subspace_vectors, bool deterministic) { if (!deterministic) { auto gen = curand::rand_generator(std::random_device{}(), CURAND_RNG_PSEUDO_DEFAULT, exec->get_stream()); curand::rand_vector( gen, subspace_vectors->get_size()[0] * subspace_vectors->get_stride(), 0.0, 1.0, subspace_vectors->get_values()); } } template <typename ValueType> void orthonormalize_subspace_vectors( std::shared_ptr<const DefaultExecutor> exec, matrix::Dense<ValueType>* subspace_vectors) { orthonormalize_subspace_vectors_kernel<default_block_size> <<<1, default_block_size, 0, exec->get_stream()>>>( subspace_vectors->get_size()[0], subspace_vectors->get_size()[1], as_device_type(subspace_vectors->get_values()), subspace_vectors->get_stride()); } template <typename ValueType> void solve_lower_triangular(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* c, const array<stopping_status>* stop_status) { const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs, default_block_size); solve_lower_triangular_kernel<<<grid_dim, default_block_size, 0, exec->get_stream()>>>( subspace_dim, nrhs, as_device_type(m->get_const_values()), m->get_stride(), as_device_type(f->get_const_values()), f->get_stride(), as_device_type(c->get_values()), c->get_stride(), stop_status->get_const_data()); } template <typename ValueType> void update_g_and_u(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto size = g->get_size()[0]; const auto p_stride = p->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = 0; i < k; i++) { const auto p_i = p->get_const_values() + i * p_stride; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, alpha->get_values(), nrhs, zero<ValueType>()); multidot_kernel<<<grid_dim, block_dim, 0, exec->get_stream()>>>( size, nrhs, as_device_type(p_i), as_device_type(g_k->get_values()), g_k->get_stride(), as_device_type(alpha->get_values()), stop_status->get_const_data()); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_values(), g_k->get_stride(), alpha->get_values()); } update_g_k_and_u_kernel<default_block_size> <<<ceildiv(size * g_k->get_stride(), default_block_size), default_block_size, 0, exec->get_stream()>>>( k, i, size, nrhs, as_device_type(alpha->get_const_values()), as_device_type(m->get_const_values()), m->get_stride(), as_device_type(g->get_const_values()), g->get_stride(), as_device_type(g_k->get_values()), g_k->get_stride(), as_device_type(u->get_values()), u->get_stride(), stop_status->get_const_data()); } update_g_kernel<default_block_size> <<<ceildiv(size * g_k->get_stride(), default_block_size), default_block_size, 0, exec->get_stream()>>>( k, size, nrhs, as_device_type(g_k->get_const_values()), g_k->get_stride(), as_device_type(g->get_values()), g->get_stride(), stop_status->get_const_data()); } template <typename ValueType> void update_m(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, const matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* m, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto size = g_k->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto p_stride = p->get_stride(); const auto m_stride = m->get_stride(); const dim3 grid_dim(ceildiv(nrhs, default_dot_dim), exec->get_num_multiprocessor() * 2); const dim3 block_dim(default_dot_dim, default_dot_dim); for (size_type i = k; i < subspace_dim; i++) { const auto p_i = p->get_const_values() + i * p_stride; auto m_i = m->get_values() + i * m_stride + k * nrhs; if (nrhs > 1 || is_complex<ValueType>()) { components::fill_array(exec, m_i, nrhs, zero<ValueType>()); multidot_kernel<<<grid_dim, block_dim, 0, exec->get_stream()>>>( size, nrhs, as_device_type(p_i), as_device_type(g_k->get_const_values()), g_k->get_stride(), as_device_type(m_i), stop_status->get_const_data()); } else { cublas::dot(exec->get_cublas_handle(), size, p_i, 1, g_k->get_const_values(), g_k->get_stride(), m_i); } } } template <typename ValueType> void update_x_r_and_f(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* g, const matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* r, matrix::Dense<ValueType>* x, const array<stopping_status>* stop_status) { const auto size = x->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(size * x->get_stride(), default_block_size); update_x_r_and_f_kernel<<<grid_dim, default_block_size, 0, exec->get_stream()>>>( k, size, subspace_dim, nrhs, as_device_type(m->get_const_values()), m->get_stride(), as_device_type(g->get_const_values()), g->get_stride(), as_device_type(u->get_const_values()), u->get_stride(), as_device_type(f->get_values()), f->get_stride(), as_device_type(r->get_values()), r->get_stride(), as_device_type(x->get_values()), x->get_stride(), stop_status->get_const_data()); components::fill_array(exec, f->get_values() + k * f->get_stride(), nrhs, zero<ValueType>()); } } // namespace template <typename ValueType> void initialize(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* subspace_vectors, bool deterministic, array<stopping_status>* stop_status) { initialize_m(exec, nrhs, m, stop_status); initialize_subspace_vectors(exec, subspace_vectors, deterministic); orthonormalize_subspace_vectors(exec, subspace_vectors); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_INITIALIZE_KERNEL); template <typename ValueType> void step_1(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* m, const matrix::Dense<ValueType>* f, const matrix::Dense<ValueType>* residual, const matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* v, const array<stopping_status>* stop_status) { solve_lower_triangular(exec, nrhs, m, f, c, stop_status); const auto num_rows = v->get_size()[0]; const auto subspace_dim = m->get_size()[0]; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); step_1_kernel<<<grid_dim, default_block_size, 0, exec->get_stream()>>>( k, num_rows, subspace_dim, nrhs, as_device_type(residual->get_const_values()), residual->get_stride(), as_device_type(c->get_const_values()), c->get_stride(), as_device_type(g->get_const_values()), g->get_stride(), as_device_type(v->get_values()), v->get_stride(), stop_status->get_const_data()); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_1_KERNEL); template <typename ValueType> void step_2(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* omega, const matrix::Dense<ValueType>* preconditioned_vector, const matrix::Dense<ValueType>* c, matrix::Dense<ValueType>* u, const array<stopping_status>* stop_status) { if (nrhs == 0) { return; } const auto num_rows = preconditioned_vector->get_size()[0]; const auto subspace_dim = u->get_size()[1] / nrhs; const auto grid_dim = ceildiv(nrhs * num_rows, default_block_size); step_2_kernel<<<grid_dim, default_block_size, 0, exec->get_stream()>>>( k, num_rows, subspace_dim, nrhs, as_device_type(omega->get_const_values()), as_device_type(preconditioned_vector->get_const_values()), preconditioned_vector->get_stride(), as_device_type(c->get_const_values()), c->get_stride(), as_device_type(u->get_values()), u->get_stride(), stop_status->get_const_data()); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_2_KERNEL); template <typename ValueType> void step_3(std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const size_type k, const matrix::Dense<ValueType>* p, matrix::Dense<ValueType>* g, matrix::Dense<ValueType>* g_k, matrix::Dense<ValueType>* u, matrix::Dense<ValueType>* m, matrix::Dense<ValueType>* f, matrix::Dense<ValueType>* alpha, matrix::Dense<ValueType>* residual, matrix::Dense<ValueType>* x, const array<stopping_status>* stop_status) { update_g_and_u(exec, nrhs, k, p, m, alpha, g, g_k, u, stop_status); update_m(exec, nrhs, k, p, g_k, m, stop_status); update_x_r_and_f(exec, nrhs, k, m, g, u, f, residual, x, stop_status); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_STEP_3_KERNEL); template <typename ValueType> void compute_omega( std::shared_ptr<const DefaultExecutor> exec, const size_type nrhs, const remove_complex<ValueType> kappa, const matrix::Dense<ValueType>* tht, const matrix::Dense<remove_complex<ValueType>>* residual_norm, matrix::Dense<ValueType>* omega, const array<stopping_status>* stop_status) { const auto grid_dim = ceildiv(nrhs, config::warp_size); compute_omega_kernel<<<grid_dim, config::warp_size, 0, exec->get_stream()>>>( nrhs, as_device_type(kappa), as_device_type(tht->get_const_values()), as_device_type(residual_norm->get_const_values()), as_device_type(omega->get_values()), stop_status->get_const_data()); } GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_IDR_COMPUTE_OMEGA_KERNEL); } // namespace idr } // namespace cuda } // namespace kernels } // namespace gko
f8011dd960e6151ff0f31f8ada02199def692d3c.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <hip/hip_runtime.h> #include "chainerx/arithmetic_ops.h" #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/float16.cuh" #include "chainerx/cuda/op_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/routines/math.h" #include "chainerx/scalar.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct AddImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); } }; class CudaAddOp : public AddOp { public: // TODO(sonots): support stream void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(AddImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(AddOp, CudaAddOp); template <typename T> struct AddASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); } CudaType x2; }; class CudaAddASOp : public AddASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(AddASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(AddASOp, CudaAddASOp); template <typename T> struct SubtractImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); } }; class CudaSubtractOp : public SubtractOp { public: void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitNumericDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(SubtractImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(SubtractOp, CudaSubtractOp); template <typename T> struct SubtractASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); } CudaType x2; }; class CudaSubtractASOp : public SubtractASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitNumericDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(SubtractASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(SubtractASOp, CudaSubtractASOp); template <typename T> struct MultiplyImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); } }; // TODO(sonots): support stream class CudaMultiplyOp : public MultiplyOp { public: void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(MultiplyImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(MultiplyOp, CudaMultiplyOp); template <typename T> struct MultiplyASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); } CudaType x2; }; class CudaMultiplyASOp : public MultiplyASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(MultiplyASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(MultiplyASOp, CudaMultiplyASOp); // CUDA does not have std::div. __device__ int8_t FloorDivide(int8_t x, int8_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); } __device__ int16_t FloorDivide(int16_t x, int16_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); } __device__ int32_t FloorDivide(int32_t x, int32_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); } __device__ int64_t FloorDivide(int64_t x, int64_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); } __device__ uint8_t FloorDivide(uint8_t x, uint8_t y) { return x / y; } __device__ float FloorDivide(float x, float y) { float rem = ::fmod(x, y); return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0); } __device__ double FloorDivide(double x, double y) { double rem = ::fmod(x, y); return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0); } __device__ cuda::Float16 FloorDivide(cuda::Float16 x, cuda::Float16 y) { return cuda::Float16{FloorDivide(static_cast<float>(x), static_cast<float>(y))}; } template <typename T> struct FloorDivideImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = cuda::FloorDivide(x1, x2); } }; class CudaFloorDivideOp : public FloorDivideOp { public: void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitNumericDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(FloorDivideImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(FloorDivideOp, CudaFloorDivideOp); template <typename T> struct FloorDivideASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::FloorDivide(x1, x2); } CudaType x2; }; class CudaFloorDivideASOp : public FloorDivideASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitNumericDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(FloorDivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(FloorDivideASOp, CudaFloorDivideASOp); template <typename T> struct DivideImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); } }; class CudaDivideOp : public DivideOp { public: void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(DivideImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(DivideOp, CudaDivideOp); template <typename T> struct DivideASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); } CudaType x2; }; class CudaDivideASOp : public DivideASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(DivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(DivideASOp, CudaDivideASOp); } // namespace } // namespace cuda } // namespace chainerx
f8011dd960e6151ff0f31f8ada02199def692d3c.cu
#include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <cuda_runtime.h> #include "chainerx/arithmetic_ops.h" #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/float16.cuh" #include "chainerx/cuda/op_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/routines/math.h" #include "chainerx/scalar.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct AddImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); } }; class CudaAddOp : public AddOp { public: // TODO(sonots): support stream void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(AddImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(AddOp, CudaAddOp); template <typename T> struct AddASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); } CudaType x2; }; class CudaAddASOp : public AddASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(AddASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(AddASOp, CudaAddASOp); template <typename T> struct SubtractImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); } }; class CudaSubtractOp : public SubtractOp { public: void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitNumericDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(SubtractImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(SubtractOp, CudaSubtractOp); template <typename T> struct SubtractASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); } CudaType x2; }; class CudaSubtractASOp : public SubtractASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitNumericDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(SubtractASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(SubtractASOp, CudaSubtractASOp); template <typename T> struct MultiplyImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); } }; // TODO(sonots): support stream class CudaMultiplyOp : public MultiplyOp { public: void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(MultiplyImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(MultiplyOp, CudaMultiplyOp); template <typename T> struct MultiplyASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); } CudaType x2; }; class CudaMultiplyASOp : public MultiplyASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(MultiplyASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(MultiplyASOp, CudaMultiplyASOp); // CUDA does not have std::div. __device__ int8_t FloorDivide(int8_t x, int8_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); } __device__ int16_t FloorDivide(int16_t x, int16_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); } __device__ int32_t FloorDivide(int32_t x, int32_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); } __device__ int64_t FloorDivide(int64_t x, int64_t y) { return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0); } __device__ uint8_t FloorDivide(uint8_t x, uint8_t y) { return x / y; } __device__ float FloorDivide(float x, float y) { float rem = std::fmod(x, y); return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0); } __device__ double FloorDivide(double x, double y) { double rem = std::fmod(x, y); return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0); } __device__ cuda::Float16 FloorDivide(cuda::Float16 x, cuda::Float16 y) { return cuda::Float16{FloorDivide(static_cast<float>(x), static_cast<float>(y))}; } template <typename T> struct FloorDivideImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = cuda::FloorDivide(x1, x2); } }; class CudaFloorDivideOp : public FloorDivideOp { public: void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitNumericDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(FloorDivideImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(FloorDivideOp, CudaFloorDivideOp); template <typename T> struct FloorDivideASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::FloorDivide(x1, x2); } CudaType x2; }; class CudaFloorDivideASOp : public FloorDivideASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitNumericDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(FloorDivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(FloorDivideASOp, CudaFloorDivideASOp); template <typename T> struct DivideImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); } }; class CudaDivideOp : public DivideOp { public: void Call(const Array& x1, const Array& x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, x2, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, const T, T>(DivideImpl<T>{}, x1_cast, x2_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(DivideOp, CudaDivideOp); template <typename T> struct DivideASImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); } CudaType x2; }; class CudaDivideASOp : public DivideASOp { public: void Call(const Array& x1, Scalar x2, const Array& out) override { Device& device = x1.device(); device.CheckDevicesCompatible(x1, out); const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype()); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, T>(DivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(DivideASOp, CudaDivideASOp); } // namespace } // namespace cuda } // namespace chainerx
09e9d74d127a8d9ac9e8dfc89e34ba3ece6b64f8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/types_c.h> #include <stdio.h> using namespace std; using namespace cv; // cudaReadModeElementType // cudaReadModeNormalizedFloat texture<uchar4, hipTextureType2D, hipReadModeNormalizedFloat> tex; __global__ void smooth_kernel(char *img, int width, int heigth, int channels) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // float u = x / (float)width; float v = y / (float)heigth; //cudaReadModeElementTypeuchar4float float4 pixel = tex2D(tex, u, v); float4 left = tex2D(tex, u - 1, v); float4 right = tex2D(tex, u + 1, v); float4 top = tex2D(tex, u, v - 1); float4 botton = tex2D(tex, u, v + 1); char* pix = img + (v*width + u)*channels; pix[0] = (left.x + right.x + top.x + botton.x) / 4 * 255; pix[1] = (left.y + right.y + top.y + botton.y) / 4 * 255; pix[2] = (left.z + right.z + top.z + botton.z) / 4 * 255; pix[3] = 0; } __global__ void zoom_kernel(char *img, int width, int heigth, int channels) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // float u = x / (float)width; float v = y / (float)heigth; //cudaReadModeElementTypeuchar4float float4 pixel = tex2D(tex, u, v); // float char* pix = img + (y*width + x)*channels; pix[0] = pixel.x * 255; pix[1] = pixel.y * 255; pix[2] = pixel.z * 255; pix[3] = 0; } int main(int argc, char **argv) { Mat img = imread("../demo.jpg", IMREAD_COLOR); if (img.empty()) return 1; Mat src = img(cv::Rect(0, 0, 256, 256)); cvtColor(src, src, CV_BGR2RGBA); int rows = src.rows; int cols = src.cols; int channels = src.channels(); int width = cols, height = rows, size = rows * cols*channels; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uchar4>(); hipArray *cuArray; hipMallocArray(&cuArray, &channelDesc, width, height); hipMemcpyToArray(cuArray, 0, 0, src.data, size, hipMemcpyHostToDevice); tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; tex.filterMode = hipFilterModeLinear; tex.normalized = true; hipBindTextureToArray(tex, cuArray, channelDesc); #pragma region zoom float scale = 4.0; Mat zoom_h; resize(src, zoom_h, cv::Size(src.rows*scale, src.cols*scale), 0, 0, INTER_LINEAR); Mat zoom_d = Mat::zeros(rows*scale, cols*scale, CV_8UC4); char *dev_out = nullptr; hipMalloc((void**)&dev_out, zoom_d.rows*zoom_d.cols*zoom_d.channels() * sizeof(uchar)); dim3 block(32, 8); dim3 grid((zoom_d.cols - 1) / block.x + 1, (zoom_d.rows - 1) / block.y + 1); zoom_kernel << <grid, block >> > (dev_out, zoom_d.cols, zoom_d.rows, zoom_d.channels()); hipError_t err = hipGetLastError(); hipDeviceSynchronize(); hipMemcpy(zoom_d.data, dev_out, zoom_d.rows*zoom_d.cols*zoom_d.channels() * sizeof(uchar), hipMemcpyDeviceToHost); hipUnbindTexture(tex); hipFree(dev_out); hipFreeArray(cuArray); imshow("orignal", src); imshow("zoom_h", zoom_h); imshow("zoom_d", zoom_d); waitKey(0); #pragma endregion zoom // #pragma region smooth // Mat out_smooth = Mat::zeros(rows, cols, CV_8UC4); // char *dev_out = nullptr; // hipMalloc((void**)&dev_out, size); // // dim3 block(16, 16); // dim3 grid((width - 1) / block.x + 1, (height - 1) / block.y + 1); // smooth_kernel << <grid, block>> > (dev_out, width, height, channels); // hipDeviceSynchronize(); // hipMemcpy(out_smooth.data, dev_out, size, hipMemcpyDeviceToHost); // // hipUnbindTexture(tex); // hipFree(dev_out); // hipFreeArray(cuArray); // // imshow("orignal", src); // imshow("smooth_image", out_smooth); // waitKey(0); // #pragma endregion smooth return 0; }
09e9d74d127a8d9ac9e8dfc89e34ba3ece6b64f8.cu
#include <cuda_runtime.h> #include <opencv2/opencv.hpp> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/types_c.h> #include <stdio.h> using namespace std; using namespace cv; // cudaReadModeElementType不支持线性插值 // cudaReadModeNormalizedFloat支持线性插值 texture<uchar4, cudaTextureType2D, cudaReadModeNormalizedFloat> tex; __global__ void smooth_kernel(char *img, int width, int heigth, int channels) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; //若使用归一化 float u = x / (float)width; float v = y / (float)heigth; //如果使用cudaReadModeElementType,则读取uchar4不能转为float float4 pixel = tex2D(tex, u, v); float4 left = tex2D(tex, u - 1, v); float4 right = tex2D(tex, u + 1, v); float4 top = tex2D(tex, u, v - 1); float4 botton = tex2D(tex, u, v + 1); char* pix = img + (v*width + u)*channels; pix[0] = (left.x + right.x + top.x + botton.x) / 4 * 255; pix[1] = (left.y + right.y + top.y + botton.y) / 4 * 255; pix[2] = (left.z + right.z + top.z + botton.z) / 4 * 255; pix[3] = 0; } __global__ void zoom_kernel(char *img, int width, int heigth, int channels) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // 使用归一化坐标,消除缩放带来的坐标影响 float u = x / (float)width; float v = y / (float)heigth; //如果使用cudaReadModeElementType,则读取uchar4不能转为float float4 pixel = tex2D(tex, u, v); // 这里的坐标时float型,也就说明了为什么可以硬件插值 char* pix = img + (y*width + x)*channels; pix[0] = pixel.x * 255; pix[1] = pixel.y * 255; pix[2] = pixel.z * 255; pix[3] = 0; } int main(int argc, char **argv) { Mat img = imread("../demo.jpg", IMREAD_COLOR); if (img.empty()) return 1; Mat src = img(cv::Rect(0, 0, 256, 256)); cvtColor(src, src, CV_BGR2RGBA); int rows = src.rows; int cols = src.cols; int channels = src.channels(); int width = cols, height = rows, size = rows * cols*channels; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uchar4>(); cudaArray *cuArray; cudaMallocArray(&cuArray, &channelDesc, width, height); cudaMemcpyToArray(cuArray, 0, 0, src.data, size, cudaMemcpyHostToDevice); tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; tex.filterMode = cudaFilterModeLinear; tex.normalized = true; cudaBindTextureToArray(tex, cuArray, channelDesc); #pragma region zoom float scale = 4.0; Mat zoom_h; resize(src, zoom_h, cv::Size(src.rows*scale, src.cols*scale), 0, 0, INTER_LINEAR); Mat zoom_d = Mat::zeros(rows*scale, cols*scale, CV_8UC4); char *dev_out = nullptr; cudaMalloc((void**)&dev_out, zoom_d.rows*zoom_d.cols*zoom_d.channels() * sizeof(uchar)); dim3 block(32, 8); dim3 grid((zoom_d.cols - 1) / block.x + 1, (zoom_d.rows - 1) / block.y + 1); zoom_kernel << <grid, block >> > (dev_out, zoom_d.cols, zoom_d.rows, zoom_d.channels()); cudaError_t err = cudaGetLastError(); cudaDeviceSynchronize(); cudaMemcpy(zoom_d.data, dev_out, zoom_d.rows*zoom_d.cols*zoom_d.channels() * sizeof(uchar), cudaMemcpyDeviceToHost); cudaUnbindTexture(tex); cudaFree(dev_out); cudaFreeArray(cuArray); imshow("orignal", src); imshow("zoom_h", zoom_h); imshow("zoom_d", zoom_d); waitKey(0); #pragma endregion zoom // #pragma region smooth // Mat out_smooth = Mat::zeros(rows, cols, CV_8UC4); // char *dev_out = nullptr; // cudaMalloc((void**)&dev_out, size); // // dim3 block(16, 16); // dim3 grid((width - 1) / block.x + 1, (height - 1) / block.y + 1); // smooth_kernel << <grid, block>> > (dev_out, width, height, channels); // cudaDeviceSynchronize(); // cudaMemcpy(out_smooth.data, dev_out, size, cudaMemcpyDeviceToHost); // // cudaUnbindTexture(tex); // cudaFree(dev_out); // cudaFreeArray(cuArray); // // imshow("orignal", src); // imshow("smooth_image", out_smooth); // waitKey(0); // #pragma endregion smooth return 0; }
129561dc5abbbde30cfa7c1c8b5d345ebe7d8a21.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2010-2011, NVIDIA Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA Corporation nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <b40c/radix_sort/enactor.cuh> #include <nih/basic/types.h> #include <nih/basic/numbers.h> namespace nih { namespace cuda { namespace sah { void radix_sort( const uint32 n_elements, uint16* keys, uint32* values, uint16* keys_tmp, uint32* values_tmp) { b40c::util::PingPongStorage<uint16, uint32> sort_storage( keys, keys_tmp, values, values_tmp ); b40c::radix_sort::Enactor enactor; if (n_elements < 1024*1024) enactor.Sort<b40c::radix_sort::SMALL_SIZE>( sort_storage, n_elements ); else enactor.Sort<b40c::radix_sort::LARGE_SIZE>( sort_storage, n_elements ); if (sort_storage.selector) { hipMemcpy( keys, keys_tmp, sizeof(uint16)*n_elements, hipMemcpyDeviceToDevice ); hipMemcpy( values, values_tmp, sizeof(uint32)*n_elements, hipMemcpyDeviceToDevice ); } } void radix_sort( const uint32 n_elements, uint32* keys, uint32* values, uint32* keys_tmp, uint32* values_tmp) { b40c::util::PingPongStorage<uint32, uint32> sort_storage( keys, keys_tmp, values, values_tmp ); b40c::radix_sort::Enactor enactor; if (n_elements < 1024*1024) enactor.Sort<b40c::radix_sort::SMALL_SIZE>( sort_storage, n_elements ); else enactor.Sort<b40c::radix_sort::LARGE_SIZE>( sort_storage, n_elements ); if (sort_storage.selector) { hipMemcpy( keys, keys_tmp, sizeof(uint32)*n_elements, hipMemcpyDeviceToDevice ); hipMemcpy( values, values_tmp, sizeof(uint32)*n_elements, hipMemcpyDeviceToDevice ); } } void radix_sort( const uint32 n_elements, uint64* keys, uint32* values, uint64* keys_tmp, uint32* values_tmp) { b40c::util::PingPongStorage<uint64, uint32> sort_storage( keys, keys_tmp, values, values_tmp ); b40c::radix_sort::Enactor enactor; if (n_elements < 1024*1024) enactor.Sort<b40c::radix_sort::SMALL_SIZE>( sort_storage, n_elements ); else enactor.Sort<b40c::radix_sort::LARGE_SIZE>( sort_storage, n_elements ); if (sort_storage.selector) { hipMemcpy( keys, keys_tmp, sizeof(uint64)*n_elements, hipMemcpyDeviceToDevice ); hipMemcpy( values, values_tmp, sizeof(uint32)*n_elements, hipMemcpyDeviceToDevice ); } } } // namespace sah } // namespace cuda } // namespace nih
129561dc5abbbde30cfa7c1c8b5d345ebe7d8a21.cu
/* * Copyright (c) 2010-2011, NVIDIA Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA Corporation nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <b40c/radix_sort/enactor.cuh> #include <nih/basic/types.h> #include <nih/basic/numbers.h> namespace nih { namespace cuda { namespace sah { void radix_sort( const uint32 n_elements, uint16* keys, uint32* values, uint16* keys_tmp, uint32* values_tmp) { b40c::util::PingPongStorage<uint16, uint32> sort_storage( keys, keys_tmp, values, values_tmp ); b40c::radix_sort::Enactor enactor; if (n_elements < 1024*1024) enactor.Sort<b40c::radix_sort::SMALL_SIZE>( sort_storage, n_elements ); else enactor.Sort<b40c::radix_sort::LARGE_SIZE>( sort_storage, n_elements ); if (sort_storage.selector) { cudaMemcpy( keys, keys_tmp, sizeof(uint16)*n_elements, cudaMemcpyDeviceToDevice ); cudaMemcpy( values, values_tmp, sizeof(uint32)*n_elements, cudaMemcpyDeviceToDevice ); } } void radix_sort( const uint32 n_elements, uint32* keys, uint32* values, uint32* keys_tmp, uint32* values_tmp) { b40c::util::PingPongStorage<uint32, uint32> sort_storage( keys, keys_tmp, values, values_tmp ); b40c::radix_sort::Enactor enactor; if (n_elements < 1024*1024) enactor.Sort<b40c::radix_sort::SMALL_SIZE>( sort_storage, n_elements ); else enactor.Sort<b40c::radix_sort::LARGE_SIZE>( sort_storage, n_elements ); if (sort_storage.selector) { cudaMemcpy( keys, keys_tmp, sizeof(uint32)*n_elements, cudaMemcpyDeviceToDevice ); cudaMemcpy( values, values_tmp, sizeof(uint32)*n_elements, cudaMemcpyDeviceToDevice ); } } void radix_sort( const uint32 n_elements, uint64* keys, uint32* values, uint64* keys_tmp, uint32* values_tmp) { b40c::util::PingPongStorage<uint64, uint32> sort_storage( keys, keys_tmp, values, values_tmp ); b40c::radix_sort::Enactor enactor; if (n_elements < 1024*1024) enactor.Sort<b40c::radix_sort::SMALL_SIZE>( sort_storage, n_elements ); else enactor.Sort<b40c::radix_sort::LARGE_SIZE>( sort_storage, n_elements ); if (sort_storage.selector) { cudaMemcpy( keys, keys_tmp, sizeof(uint64)*n_elements, cudaMemcpyDeviceToDevice ); cudaMemcpy( values, values_tmp, sizeof(uint32)*n_elements, cudaMemcpyDeviceToDevice ); } } } // namespace sah } // namespace cuda } // namespace nih
26bb3718c3073d68df247adadac9e54358f91a7e.hip
// !!! This is a file automatically generated by hipify!!! /* Assumptions 1. Even length of filters */ /* Major code borrows: 1. rosetta fft code */ #include<stdio.h> #include<math.h> #include"stdio.h" #define N 16 //#define N 16384 //#define Nfil 256 #define BlkSize 1024 #define pi 3.1415 #define fc 50 #define Acheb 40 //db/decade #define k 20 #include "math.h" #include<hipfft.h> #include<stdlib.h> #include<complex.h> //#include<fftw.h> //Defined macros double PI; typedef double complex cplx; typedef struct{ double *sig_t; cplx *sig_f; }Filter; void init_fil(Filter *fil,int arr_len) { fil->sig_t = (double*)calloc(arr_len,sizeof(double)); fil->sig_f = (cplx*)calloc(arr_len,sizeof(cplx)); } void _fft(cplx buf[], cplx out[], int n, int step) { if (step < n) { _fft(out, buf, n, step * 2); _fft(out + step, buf + step, n, step * 2); for (int i = 0; i < n; i += 2 * step) { cplx t = cexp(-I * PI * i / n) * out[i + step]; buf[i / 2] = out[i] + t; buf[(i + n)/2] = out[i] - t; } } } void fft(cplx buf[], int n) { cplx out[n]; for (int i = 0; i < n; i++) out[i] = buf[i]; _fft(buf, out, n, 1); } void show(const char * s, cplx buf[]) { printf("%s", s); for (int i = 0; i < 16; i++) if (!cimag(buf[i])) printf("%g ", creal(buf[i])); else printf("(%g + j%g) ", creal(buf[i]), cimag(buf[i])); } void printUnsignedRange(int bytes) { int bits = 8 * bytes; unsigned long long to = (1LLU << (bits - 1)) + ((1LL << (bits - 1)) - 1);; printf(" 0 to %llu\n\n", to); } void shift_left_cplx(cplx *arr,int len,int shift,int offset) { memcpy(arr+len-offset,arr,shift*sizeof(cplx)); arr = arr+shift; } void shift_left_fl(float *arr,int len,int shift,int offset) { memcpy(arr+len-offset,arr,shift*sizeof(float)); arr = arr+shift; } unsigned int next_pow2(unsigned int n) { unsigned int p = 1; if (n && !(n & (n - 1))) return n; while (p < n) p <<= 1; return p; } double cheby_poly(double n, double x) { double res; if (fabs(x) <= 1) res = cos(n*acos(x)); else res = creal(ccosh(n*cacosh(x))); return res; } void cheby_win(double *out,cplx *fout,int Nfil,double tolerance) { //out = (float*)calloc(Nfil,sizeof(float)); int _temp = next_pow2(Nfil); cplx *temp = (cplx*)calloc(_temp,sizeof(cplx)); int idx, i; //float M, n, sum = 0, max=0; double tg = 1/tolerance;// /* 1/r term [2], 10^gamma [2] */ double expr = cosh(acosh(tg)/(Nfil-1)); //M = Nfil/2; printf("Nfil:%d x0:%lf _temp:%lu",Nfil,expr,_temp); //exit(0); for(idx=0; idx<Nfil; idx++) { temp[idx] = cheby_poly(Nfil-1,expr*cos((pi*idx)/Nfil))*tolerance;//cosf(2.0*n*pi*i/Nfil); fout[idx] = temp[idx]; printf("Filter:%lf\n",creal(temp[idx])); } for(idx=0;idx<_temp-Nfil;idx++) temp[idx]=0; //printf("\nCount:%d\n",idx); /* for(idx=0; idx<Nfil; idx++) out[idx] /= max; // normalise everything */ fft(temp,_temp); show("\ntempt:",temp); exit(0); shift_left_cplx(temp,Nfil,Nfil/2,Nfil/2); exit(0); show("\ntempsht:",temp); //exit(0); for(idx=0;idx<Nfil;idx++) out[idx] = creal(temp[idx]); } float sum_arr_fl(float *arr,int num) { float sum = 0; for(int i=0;i<num/4;i++) sum += arr[i]+arr[i+int(num/4.0)]+arr[i+int(num/2.0)]+arr[i+int(num*3/4)]; } float sum_arr_cplx(cplx *arr,int num) { cplx sum = 0; for(int i=0;i<num/4;i++) sum += arr[i]+arr[i+int(num/4.0)]+arr[i+int(num/2.0)]+arr[i+int(num*3/4)]; } void normalize_max(cplx *tempx2,int len,int max) { for(int i=0;i<len/2;i++) { tempx2[i] /= max; tempx2[i+len/2] /= max; } } void ifft_set(cplx *arr,int norm) { normalize_max(arr,norm,norm); cplx *arr2; arr2 = arr+1; cplx tmp = arr[0]; for(int i=0;i<norm-1;i++) { cplx tmp = arr[i]; arr[i] = arr[norm-1-i]; arr[norm-1-i] = tmp; } } void init_cplx_from_db(cplx *dest,int len,double *src_real,double *src_imag) { if(src_real==NULL) { for(int i=0;i<len;i++) dest[i] = I*src_imag[i]; } else if(src_imag==NULL) { for(int i=0;i<len;i++) dest[i] = src_real[i]; } else { for(int i=0;i<len;i++) dest[i] = src_real[i]+I*src_imag[i]; } } void init_db_from_cplx(double *dest,int len,cplx *src) { for(int i=0;i<len;i++) dest[i] = creal(src[i]); } void conv_window(Filter *fil,int nfil,int bin_size) { //fil.sig_t = (float*)calloc(Nan,sizeof(float)); //fil.sig_f = (cplx*)calloc(Nan,sizeof(cplx)); cplx *tempx = (cplx*)calloc(N,sizeof(cplx)); cplx *tempx2 = (cplx*)calloc(N,sizeof(cplx)); init_cplx_from_db(tempx,N,fil->sig_t,NULL); int width = N - nfil, bias = bin_size/2; int x=0; show("\nFFT first:\n",tempx); exit(0); memcpy(tempx+nfil,&x,width*sizeof(cplx)); // Time shifting and padding the signal shift_left_cplx(tempx,nfil,nfil/2,0); show("\nNo FFT:\n",tempx); fft(tempx,N); show("\nFFT tempx\n",tempx); float max = 0; cplx sum = sum_arr_cplx(tempx,nfil); for(int i=0;i<N;i++) { tempx2[(i+bias)%N] = sum; int intl = cabs(sum); //printf("(%lf,j%lf)",creal(sum),cimag(sum)); if(max<=intl) max = intl; sum = sum+tempx[(i+bias)%N]-tempx[i%N]; } normalize_max(tempx2,nfil,max); show("\ntempx2:\n",tempx2); cplx mag = 1; cplx step = cexp(-2*pi*I*(nfil/2)/N); for(int i=0;i<N;i++) { tempx2[i] *= mag; mag *= step; } tempx = tempx2; show("\nTempx\n",tempx); fft(tempx2,nfil); ifft_set(tempx2,nfil); init_db_from_cplx(fil->sig_t,nfil,tempx2); fil->sig_f = tempx; show("\n\nTEMPX\n",fil->sig_f); exit(0); //free(tempx); //free(tempx2); } /* void inner_loop() { } */ void generate_signal(double x[N],cplx xf[N],int fIdx[k]) { unsigned int i,n,idx; for(i=0;i<k;i++) { idx = rand()%N; x[idx] = 1; fIdx[i] = idx; xf[idx] = 1; } fft(xf,N); } void init_double(double* arr,int num) { for(int i=0;i<num;i++) arr[i]=0.0; } void init_cplx(cplx* arr,int num) { for(int i=0;i<num;i++) arr[i] = 0.0+I*0.0; } int main() { // System Constants int Bcst_loc = 2; int Bcst_est = 2; float lobefrac_loc =.003; float tolerance_loc = .00000001; int b_loc = 68;//Number of bins formed int B_loc = 128; int B_thresh = 20; int loc_loops = 7; int threshold_loops = 6; float lobefrac_est = .003; float tolerance_est = .00000001; int b_est = 79; int B_est = 128; int est_loops = 16; int Comb_loops = 10; //int k = 10; // Secondary Parameters int Nfil_loc = (1/pi)*(1.0/lobefrac_loc)*acosh(1.0/tolerance_loc); int Nfil_est = (1/pi)*(1.0/lobefrac_est)*acosh(1.0/tolerance_est); if(not(Nfil_loc%2))//Making filter element number odd Nfil_loc--; if(not(Nfil_est%2)) Nfil_est--; double x[N]; double *tfilLoc = (double*)calloc(Nfil_loc,sizeof(double)); double *tfilEst = (double*)calloc(Nfil_est,sizeof(double)); cplx xf[N]; cplx *ffilLoc = (cplx*)calloc(Nfil_loc,sizeof(cplx)); cplx *ffilEst = (cplx*)calloc(Nfil_est,sizeof(cplx)); int freqIdx[k],i,j; //Nfil_loc = 9; //Nfil_est = 9; //exit(0); //printUnsignedRange(sizeof(unsigned int)); printf("\nNfil: %d %d",Nfil_loc,Nfil_est); init_double(x,N); init_double(tfilLoc,Nfil_loc); init_double(tfilEst,Nfil_est); //exit(0); init_cplx(xf,N); init_cplx(ffilLoc,Nfil_loc); init_cplx(ffilEst,Nfil_est); show("\ntest:",xf); //x = (float*)calloc(N,sizeof(float)); //xf = (float*)calloc(N,sizeof(float)); generate_signal(x,xf,freqIdx); // Filter Design cheby_win(tfilLoc,ffilLoc,Nfil_loc,tolerance_loc); //cheby_win(tfilEst,ffilEst,Nfil_est,tolerance_est); printf("\nFilter:\n"); for(i=0;i<N;i++) { if(x[i]!=0.0) printf("\n%d:%f",i,x[i]); } Filter f_loc,f_est; init_fil(&f_loc,Nfil_loc); init_fil(&f_est,Nfil_est); f_loc.sig_f = ffilLoc; f_loc.sig_t = tfilLoc; f_est.sig_f = ffilLoc; f_est.sig_t = tfilLoc; conv_window(&f_loc,Nfil_loc,b_loc); show("Floc:",f_loc.sig_f); //show("\nFFT:\n",tfilLoc); //double complex z = 2+3*I; //printf("%f+I*%f",creal(z),cimag(z)); return 0; }
26bb3718c3073d68df247adadac9e54358f91a7e.cu
/* Assumptions 1. Even length of filters */ /* Major code borrows: 1. rosetta fft code */ #include<stdio.h> #include<math.h> #include"stdio.h" #define N 16 //#define N 16384 //#define Nfil 256 #define BlkSize 1024 #define pi 3.1415 #define fc 50 #define Acheb 40 //db/decade #define k 20 #include "math.h" #include<cufft.h> #include<stdlib.h> #include<complex.h> //#include<fftw.h> //Defined macros double PI; typedef double complex cplx; typedef struct{ double *sig_t; cplx *sig_f; }Filter; void init_fil(Filter *fil,int arr_len) { fil->sig_t = (double*)calloc(arr_len,sizeof(double)); fil->sig_f = (cplx*)calloc(arr_len,sizeof(cplx)); } void _fft(cplx buf[], cplx out[], int n, int step) { if (step < n) { _fft(out, buf, n, step * 2); _fft(out + step, buf + step, n, step * 2); for (int i = 0; i < n; i += 2 * step) { cplx t = cexp(-I * PI * i / n) * out[i + step]; buf[i / 2] = out[i] + t; buf[(i + n)/2] = out[i] - t; } } } void fft(cplx buf[], int n) { cplx out[n]; for (int i = 0; i < n; i++) out[i] = buf[i]; _fft(buf, out, n, 1); } void show(const char * s, cplx buf[]) { printf("%s", s); for (int i = 0; i < 16; i++) if (!cimag(buf[i])) printf("%g ", creal(buf[i])); else printf("(%g + j%g) ", creal(buf[i]), cimag(buf[i])); } void printUnsignedRange(int bytes) { int bits = 8 * bytes; unsigned long long to = (1LLU << (bits - 1)) + ((1LL << (bits - 1)) - 1);; printf(" 0 to %llu\n\n", to); } void shift_left_cplx(cplx *arr,int len,int shift,int offset) { memcpy(arr+len-offset,arr,shift*sizeof(cplx)); arr = arr+shift; } void shift_left_fl(float *arr,int len,int shift,int offset) { memcpy(arr+len-offset,arr,shift*sizeof(float)); arr = arr+shift; } unsigned int next_pow2(unsigned int n) { unsigned int p = 1; if (n && !(n & (n - 1))) return n; while (p < n) p <<= 1; return p; } double cheby_poly(double n, double x) { double res; if (fabs(x) <= 1) res = cos(n*acos(x)); else res = creal(ccosh(n*cacosh(x))); return res; } void cheby_win(double *out,cplx *fout,int Nfil,double tolerance) { //out = (float*)calloc(Nfil,sizeof(float)); int _temp = next_pow2(Nfil); cplx *temp = (cplx*)calloc(_temp,sizeof(cplx)); int idx, i; //float M, n, sum = 0, max=0; double tg = 1/tolerance;// /* 1/r term [2], 10^gamma [2] */ double expr = cosh(acosh(tg)/(Nfil-1)); //M = Nfil/2; printf("Nfil:%d x0:%lf _temp:%lu",Nfil,expr,_temp); //exit(0); for(idx=0; idx<Nfil; idx++) { temp[idx] = cheby_poly(Nfil-1,expr*cos((pi*idx)/Nfil))*tolerance;//cosf(2.0*n*pi*i/Nfil); fout[idx] = temp[idx]; printf("Filter:%lf\n",creal(temp[idx])); } for(idx=0;idx<_temp-Nfil;idx++) temp[idx]=0; //printf("\nCount:%d\n",idx); /* for(idx=0; idx<Nfil; idx++) out[idx] /= max; // normalise everything */ fft(temp,_temp); show("\ntempt:",temp); exit(0); shift_left_cplx(temp,Nfil,Nfil/2,Nfil/2); exit(0); show("\ntempsht:",temp); //exit(0); for(idx=0;idx<Nfil;idx++) out[idx] = creal(temp[idx]); } float sum_arr_fl(float *arr,int num) { float sum = 0; for(int i=0;i<num/4;i++) sum += arr[i]+arr[i+int(num/4.0)]+arr[i+int(num/2.0)]+arr[i+int(num*3/4)]; } float sum_arr_cplx(cplx *arr,int num) { cplx sum = 0; for(int i=0;i<num/4;i++) sum += arr[i]+arr[i+int(num/4.0)]+arr[i+int(num/2.0)]+arr[i+int(num*3/4)]; } void normalize_max(cplx *tempx2,int len,int max) { for(int i=0;i<len/2;i++) { tempx2[i] /= max; tempx2[i+len/2] /= max; } } void ifft_set(cplx *arr,int norm) { normalize_max(arr,norm,norm); cplx *arr2; arr2 = arr+1; cplx tmp = arr[0]; for(int i=0;i<norm-1;i++) { cplx tmp = arr[i]; arr[i] = arr[norm-1-i]; arr[norm-1-i] = tmp; } } void init_cplx_from_db(cplx *dest,int len,double *src_real,double *src_imag) { if(src_real==NULL) { for(int i=0;i<len;i++) dest[i] = I*src_imag[i]; } else if(src_imag==NULL) { for(int i=0;i<len;i++) dest[i] = src_real[i]; } else { for(int i=0;i<len;i++) dest[i] = src_real[i]+I*src_imag[i]; } } void init_db_from_cplx(double *dest,int len,cplx *src) { for(int i=0;i<len;i++) dest[i] = creal(src[i]); } void conv_window(Filter *fil,int nfil,int bin_size) { //fil.sig_t = (float*)calloc(Nan,sizeof(float)); //fil.sig_f = (cplx*)calloc(Nan,sizeof(cplx)); cplx *tempx = (cplx*)calloc(N,sizeof(cplx)); cplx *tempx2 = (cplx*)calloc(N,sizeof(cplx)); init_cplx_from_db(tempx,N,fil->sig_t,NULL); int width = N - nfil, bias = bin_size/2; int x=0; show("\nFFT first:\n",tempx); exit(0); memcpy(tempx+nfil,&x,width*sizeof(cplx)); // Time shifting and padding the signal shift_left_cplx(tempx,nfil,nfil/2,0); show("\nNo FFT:\n",tempx); fft(tempx,N); show("\nFFT tempx\n",tempx); float max = 0; cplx sum = sum_arr_cplx(tempx,nfil); for(int i=0;i<N;i++) { tempx2[(i+bias)%N] = sum; int intl = cabs(sum); //printf("(%lf,j%lf)",creal(sum),cimag(sum)); if(max<=intl) max = intl; sum = sum+tempx[(i+bias)%N]-tempx[i%N]; } normalize_max(tempx2,nfil,max); show("\ntempx2:\n",tempx2); cplx mag = 1; cplx step = cexp(-2*pi*I*(nfil/2)/N); for(int i=0;i<N;i++) { tempx2[i] *= mag; mag *= step; } tempx = tempx2; show("\nTempx\n",tempx); fft(tempx2,nfil); ifft_set(tempx2,nfil); init_db_from_cplx(fil->sig_t,nfil,tempx2); fil->sig_f = tempx; show("\n\nTEMPX\n",fil->sig_f); exit(0); //free(tempx); //free(tempx2); } /* void inner_loop() { } */ void generate_signal(double x[N],cplx xf[N],int fIdx[k]) { unsigned int i,n,idx; for(i=0;i<k;i++) { idx = rand()%N; x[idx] = 1; fIdx[i] = idx; xf[idx] = 1; } fft(xf,N); } void init_double(double* arr,int num) { for(int i=0;i<num;i++) arr[i]=0.0; } void init_cplx(cplx* arr,int num) { for(int i=0;i<num;i++) arr[i] = 0.0+I*0.0; } int main() { // System Constants int Bcst_loc = 2; int Bcst_est = 2; float lobefrac_loc =.003; float tolerance_loc = .00000001; int b_loc = 68;//Number of bins formed int B_loc = 128; int B_thresh = 20; int loc_loops = 7; int threshold_loops = 6; float lobefrac_est = .003; float tolerance_est = .00000001; int b_est = 79; int B_est = 128; int est_loops = 16; int Comb_loops = 10; //int k = 10; // Secondary Parameters int Nfil_loc = (1/pi)*(1.0/lobefrac_loc)*acosh(1.0/tolerance_loc); int Nfil_est = (1/pi)*(1.0/lobefrac_est)*acosh(1.0/tolerance_est); if(not(Nfil_loc%2))//Making filter element number odd Nfil_loc--; if(not(Nfil_est%2)) Nfil_est--; double x[N]; double *tfilLoc = (double*)calloc(Nfil_loc,sizeof(double)); double *tfilEst = (double*)calloc(Nfil_est,sizeof(double)); cplx xf[N]; cplx *ffilLoc = (cplx*)calloc(Nfil_loc,sizeof(cplx)); cplx *ffilEst = (cplx*)calloc(Nfil_est,sizeof(cplx)); int freqIdx[k],i,j; //Nfil_loc = 9; //Nfil_est = 9; //exit(0); //printUnsignedRange(sizeof(unsigned int)); printf("\nNfil: %d %d",Nfil_loc,Nfil_est); init_double(x,N); init_double(tfilLoc,Nfil_loc); init_double(tfilEst,Nfil_est); //exit(0); init_cplx(xf,N); init_cplx(ffilLoc,Nfil_loc); init_cplx(ffilEst,Nfil_est); show("\ntest:",xf); //x = (float*)calloc(N,sizeof(float)); //xf = (float*)calloc(N,sizeof(float)); generate_signal(x,xf,freqIdx); // Filter Design cheby_win(tfilLoc,ffilLoc,Nfil_loc,tolerance_loc); //cheby_win(tfilEst,ffilEst,Nfil_est,tolerance_est); printf("\nFilter:\n"); for(i=0;i<N;i++) { if(x[i]!=0.0) printf("\n%d:%f",i,x[i]); } Filter f_loc,f_est; init_fil(&f_loc,Nfil_loc); init_fil(&f_est,Nfil_est); f_loc.sig_f = ffilLoc; f_loc.sig_t = tfilLoc; f_est.sig_f = ffilLoc; f_est.sig_t = tfilLoc; conv_window(&f_loc,Nfil_loc,b_loc); show("Floc:",f_loc.sig_f); //show("\nFFT:\n",tfilLoc); //double complex z = 2+3*I; //printf("%f+I*%f",creal(z),cimag(z)); return 0; }
25bc81db6533279598eac2280608b2bf9800b893.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fastertransformer/th_op/utils.h" namespace torch_ext { // modified from TensorFlow's implementation of tf.contrib.seq2seq.gather_tree __global__ void gather_tree_kernel(const int batch_size, const int max_time, const int beam_width, const int end_token, const int* step_ids, const int* parent_ids, const int* max_sequence_lengths, int* beams) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < batch_size * beam_width; i += gridDim.x * blockDim.x) { const int batch = i / beam_width; const int beam = i % beam_width; const int max_seq_len_b = min(max_time, __ldg(max_sequence_lengths + batch)); if (max_seq_len_b <= 0) { continue; } #define GET_IX(time_ix, beam_ix) (batch_size * beam_width * (time_ix) + beam_width * batch + (beam_ix)) const int initial_beam_ix = GET_IX(max_seq_len_b - 1, beam); beams[initial_beam_ix] = __ldg(step_ids + initial_beam_ix); int parent = __ldg(parent_ids + initial_beam_ix); bool found_bad = false; for (int level = max_seq_len_b - 2; level >= 0; --level) { const int level_beam_ix = GET_IX(level, beam); const int level_parent_ix = GET_IX(level, parent); if (parent < 0 || parent > beam_width) { beams[level_beam_ix] = -1; parent = -1; found_bad = true; } else { beams[level_beam_ix] = __ldg(step_ids + level_parent_ix); parent = __ldg(parent_ids + level_parent_ix); } } // Not necessary when using a BeamSearchDecoder, but necessary // when a user feeds in possibly broken trajectory (i.e., non-eos // entries in a beam following eos entries). if (!found_bad) { bool finished = false; for (int time = 0; time < max_seq_len_b; ++time) { const int level_beam_ix = GET_IX(time, beam); if (finished) { beams[level_beam_ix] = end_token; } else if (beams[level_beam_ix] == end_token) { finished = true; } } } #undef GET_IX } } void gather_tree_kernel_launcher(int max_time, int batch_size, int beam_width, int* step_ids, int* parent_ids, int* max_sequence_lengths, int end_token, int* beams, hipStream_t stream) { int batchbeam = batch_size * beam_width; dim3 grid(1), block(batchbeam); // though decoder do not support > 1024 for now if (batchbeam > 1024) { grid.x = ceil(batch_size * beam_width / 1024.); block.x = 1024; } hipLaunchKernelGGL(( gather_tree_kernel), dim3(grid), dim3(block), 0, stream, batch_size, max_time, beam_width, end_token, step_ids, parent_ids, max_sequence_lengths, beams); } } // namespace torch_ext
25bc81db6533279598eac2280608b2bf9800b893.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fastertransformer/th_op/utils.h" namespace torch_ext { // modified from TensorFlow's implementation of tf.contrib.seq2seq.gather_tree __global__ void gather_tree_kernel(const int batch_size, const int max_time, const int beam_width, const int end_token, const int* step_ids, const int* parent_ids, const int* max_sequence_lengths, int* beams) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < batch_size * beam_width; i += gridDim.x * blockDim.x) { const int batch = i / beam_width; const int beam = i % beam_width; const int max_seq_len_b = min(max_time, __ldg(max_sequence_lengths + batch)); if (max_seq_len_b <= 0) { continue; } #define GET_IX(time_ix, beam_ix) (batch_size * beam_width * (time_ix) + beam_width * batch + (beam_ix)) const int initial_beam_ix = GET_IX(max_seq_len_b - 1, beam); beams[initial_beam_ix] = __ldg(step_ids + initial_beam_ix); int parent = __ldg(parent_ids + initial_beam_ix); bool found_bad = false; for (int level = max_seq_len_b - 2; level >= 0; --level) { const int level_beam_ix = GET_IX(level, beam); const int level_parent_ix = GET_IX(level, parent); if (parent < 0 || parent > beam_width) { beams[level_beam_ix] = -1; parent = -1; found_bad = true; } else { beams[level_beam_ix] = __ldg(step_ids + level_parent_ix); parent = __ldg(parent_ids + level_parent_ix); } } // Not necessary when using a BeamSearchDecoder, but necessary // when a user feeds in possibly broken trajectory (i.e., non-eos // entries in a beam following eos entries). if (!found_bad) { bool finished = false; for (int time = 0; time < max_seq_len_b; ++time) { const int level_beam_ix = GET_IX(time, beam); if (finished) { beams[level_beam_ix] = end_token; } else if (beams[level_beam_ix] == end_token) { finished = true; } } } #undef GET_IX } } void gather_tree_kernel_launcher(int max_time, int batch_size, int beam_width, int* step_ids, int* parent_ids, int* max_sequence_lengths, int end_token, int* beams, cudaStream_t stream) { int batchbeam = batch_size * beam_width; dim3 grid(1), block(batchbeam); // though decoder do not support > 1024 for now if (batchbeam > 1024) { grid.x = ceil(batch_size * beam_width / 1024.); block.x = 1024; } gather_tree_kernel<<<grid, block, 0, stream>>>(batch_size, max_time, beam_width, end_token, step_ids, parent_ids, max_sequence_lengths, beams); } } // namespace torch_ext
bcdf59ead22e753cb9b5eea4a7df4466bbd65be3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "upsample_impl.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _UpampleNearestKernel(const size_t rank, const int64_t* input_pitches, const fast_divmod* output_div_pitches, const fast_divmod* scales_div, const T* input_data, T* output_data, const size_t N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG input_index = 0; CUDA_LONG output_index = id; int div, mod; for (int dim = 0; dim < rank; ++dim) { output_div_pitches[dim].divmod(output_index, div, mod); output_index = mod; if (scales_div[dim].d_ != 1 && div > 0) { scales_div[dim].divmod(div, div, mod); } input_index += input_pitches[dim] * div; } output_data[id] = input_data[input_index]; } template <typename T> __global__ void _UpampleBilinearKernel(const int64_t input_dim2, const int64_t* input_pitches, const fast_divmod* output_div_pitches, const fast_divmod* scales_div, const T* input_data, T* output_data, const size_t N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG input_index = 0; // For bilinear mode, scales[0]=scales[1]=1 int mod; int index_of_dim0, index_of_dim1, index_of_dim2, index_of_dim3; output_div_pitches[0].divmod(id, index_of_dim0, mod); output_div_pitches[1].divmod(mod, index_of_dim1, mod); output_div_pitches[2].divmod(mod, index_of_dim2, mod); index_of_dim3 = mod; int index_of_input_dim2, index_of_input_dim3, x_offset, y_offset; scales_div[2].divmod(index_of_dim2, index_of_input_dim2, y_offset); scales_div[3].divmod(index_of_dim3, index_of_input_dim3, x_offset); input_index = index_of_dim0 * input_pitches[0] + index_of_dim1 * input_pitches[1] + index_of_input_dim2 * input_pitches[2] + index_of_input_dim3; T x00 = input_data[input_index]; T x10, x01, x11; bool end_of_dim2 = false; if (index_of_input_dim2 == (input_dim2 - 1)) { // It's the end in dimension 2 x01 = x00; end_of_dim2 = true; } else { x01 = input_data[input_index + input_pitches[2]]; } if (index_of_input_dim3 == (input_pitches[2] - 1)) { // It's the end in dimension 3 x10 = x00; x11 = x01; } else { x10 = input_data[input_index + 1]; x11 = end_of_dim2 ? x10 : input_data[input_index + input_pitches[2] + 1]; } T y_offset_T = static_cast<T>(y_offset); T x_offset_T = static_cast<T>(x_offset); T scales_div2_T = static_cast<T>(scales_div[2].d_); T scales_div3_T = static_cast<T>(scales_div[3].d_); T y0 = x00 + static_cast<T>(y_offset_T * (x01 - x00) / scales_div2_T); T y1 = x10 + static_cast<T>(y_offset_T * (x11 - x10) / scales_div2_T); output_data[id] = y0 + static_cast<T>(x_offset_T * (y1 - y0) / scales_div3_T); } template <typename T> void UpampleImpl(const onnxruntime::UpsampleMode upsample_mode, const size_t rank, const int64_t input_dim2, const int64_t* input_pitches, const fast_divmod* output_div_pitches, const fast_divmod* scales_div, const T* input_data, T* output_data, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); if (onnxruntime::UpsampleMode::NN == upsample_mode) { hipLaunchKernelGGL(( _UpampleNearestKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, rank, input_pitches, output_div_pitches, scales_div, input_data, output_data, N); } else if (onnxruntime::UpsampleMode::LINEAR == upsample_mode) { hipLaunchKernelGGL(( _UpampleBilinearKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, input_dim2, input_pitches, output_div_pitches, scales_div, input_data, output_data, N); } } #define SPECIALIZED_IMPL(T) \ template void UpampleImpl<T>(const onnxruntime::UpsampleMode upsample_mode, \ const size_t rank, \ const int64_t input_dim2, \ const int64_t* input_pitches, \ const fast_divmod* output_div_pitches, \ const fast_divmod* scales_div, \ const T* input_data, \ T* output_data, \ const size_t N); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) SPECIALIZED_IMPL(half) SPECIALIZED_IMPL(int32_t) } // namespace cuda } // namespace onnxruntime
bcdf59ead22e753cb9b5eea4a7df4466bbd65be3.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "upsample_impl.h" namespace onnxruntime { namespace cuda { template <typename T> __global__ void _UpampleNearestKernel(const size_t rank, const int64_t* input_pitches, const fast_divmod* output_div_pitches, const fast_divmod* scales_div, const T* input_data, T* output_data, const size_t N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG input_index = 0; CUDA_LONG output_index = id; int div, mod; for (int dim = 0; dim < rank; ++dim) { output_div_pitches[dim].divmod(output_index, div, mod); output_index = mod; if (scales_div[dim].d_ != 1 && div > 0) { scales_div[dim].divmod(div, div, mod); } input_index += input_pitches[dim] * div; } output_data[id] = input_data[input_index]; } template <typename T> __global__ void _UpampleBilinearKernel(const int64_t input_dim2, const int64_t* input_pitches, const fast_divmod* output_div_pitches, const fast_divmod* scales_div, const T* input_data, T* output_data, const size_t N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG input_index = 0; // For bilinear mode, scales[0]=scales[1]=1 int mod; int index_of_dim0, index_of_dim1, index_of_dim2, index_of_dim3; output_div_pitches[0].divmod(id, index_of_dim0, mod); output_div_pitches[1].divmod(mod, index_of_dim1, mod); output_div_pitches[2].divmod(mod, index_of_dim2, mod); index_of_dim3 = mod; int index_of_input_dim2, index_of_input_dim3, x_offset, y_offset; scales_div[2].divmod(index_of_dim2, index_of_input_dim2, y_offset); scales_div[3].divmod(index_of_dim3, index_of_input_dim3, x_offset); input_index = index_of_dim0 * input_pitches[0] + index_of_dim1 * input_pitches[1] + index_of_input_dim2 * input_pitches[2] + index_of_input_dim3; T x00 = input_data[input_index]; T x10, x01, x11; bool end_of_dim2 = false; if (index_of_input_dim2 == (input_dim2 - 1)) { // It's the end in dimension 2 x01 = x00; end_of_dim2 = true; } else { x01 = input_data[input_index + input_pitches[2]]; } if (index_of_input_dim3 == (input_pitches[2] - 1)) { // It's the end in dimension 3 x10 = x00; x11 = x01; } else { x10 = input_data[input_index + 1]; x11 = end_of_dim2 ? x10 : input_data[input_index + input_pitches[2] + 1]; } T y_offset_T = static_cast<T>(y_offset); T x_offset_T = static_cast<T>(x_offset); T scales_div2_T = static_cast<T>(scales_div[2].d_); T scales_div3_T = static_cast<T>(scales_div[3].d_); T y0 = x00 + static_cast<T>(y_offset_T * (x01 - x00) / scales_div2_T); T y1 = x10 + static_cast<T>(y_offset_T * (x11 - x10) / scales_div2_T); output_data[id] = y0 + static_cast<T>(x_offset_T * (y1 - y0) / scales_div3_T); } template <typename T> void UpampleImpl(const onnxruntime::UpsampleMode upsample_mode, const size_t rank, const int64_t input_dim2, const int64_t* input_pitches, const fast_divmod* output_div_pitches, const fast_divmod* scales_div, const T* input_data, T* output_data, const size_t N) { int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); if (onnxruntime::UpsampleMode::NN == upsample_mode) { _UpampleNearestKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( rank, input_pitches, output_div_pitches, scales_div, input_data, output_data, N); } else if (onnxruntime::UpsampleMode::LINEAR == upsample_mode) { _UpampleBilinearKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( input_dim2, input_pitches, output_div_pitches, scales_div, input_data, output_data, N); } } #define SPECIALIZED_IMPL(T) \ template void UpampleImpl<T>(const onnxruntime::UpsampleMode upsample_mode, \ const size_t rank, \ const int64_t input_dim2, \ const int64_t* input_pitches, \ const fast_divmod* output_div_pitches, \ const fast_divmod* scales_div, \ const T* input_data, \ T* output_data, \ const size_t N); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) SPECIALIZED_IMPL(half) SPECIALIZED_IMPL(int32_t) } // namespace cuda } // namespace onnxruntime
5ee80ee78881d50827249a055c5908c8b8e2c42f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <fcntl.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <string.h> #include "spec.h" #include "gradient.h" #include "convert.h" #include "stencilMVM.h" #include "windows_acml.h" #define TEXTON32 1 #define TEXTON64 2 uint radii[4]={3,5,10,20}; float** filters; void savgol_filter(float* filt, int d, float inra, float inrb, float theta) { int k=1; assert(d>0); assert(k>=1 && k<=d+1); float ra=max(inra,1.5); float rb=max(inrb,1.5); float ira2=1.0/(ra*ra); float irb2 = 1.0/(rb*rb); int wr = int(floor(max(ra,rb))+0.5); int wd = 2*wr+1; float sint = sin(theta); float cost = cos(theta); //float* filt = new float[wd*wd]; memset(filt,0,sizeof(float)*wd*wd); float* xx= new float[2*d+1]; memset(xx,0,sizeof(float)*(2*d+1)); for(int u=-wr;u<=wr;u++) { for(int v=-wr;v<=wr;v++) { float ai=-u*sint+v*cost; float bi=u*cost+v*sint; if(ai*ai*ira2 + bi*bi*irb2 > 1) { continue; } for(int i=0;i<2*d+1;i++) { xx[i]=xx[i]+pow(ai,i); } } } float *A=new float[(d+1)*(d+1)]; for(int i=0;i<d+1;i++) { for(int j=0;j<d+1;j++) { A[i*(d+1)+j] = xx[i+j]; } } //float *invA=new float[(d+1)*(d+1)]; //A = inv(A) int info; int* ipiv = new int[d+1]; float* work=new float[d+1]; int kk = d+1; /*sgetrf*/LAPACK_sgetrf(&kk, &kk, A, &kk, ipiv, &info); /*sgetri*/LAPACK_sgetri(&kk, A, &kk, ipiv, work, &kk, &info); for(int u=-wr;u<=wr;u++) { for(int v=-wr;v<=wr;v++) { float ai=-u*sint+v*cost; float bi=u*cost+v*sint; if(ai*ai*ira2 + bi*bi*irb2 > 1) { continue; } for(int i=0;i<d+1;i++) { filt[(v+wr)*wd+u+wr] += A[i]*pow(ai,i); //doing only k=1 } } } delete [] A; delete [] ipiv; delete [] work; delete [] xx; } void construct_parabola_filters(uint number, uint* radii, uint norients) { filters = (float**)malloc(sizeof(float*) * number); for(int filter = 0; filter < number; filter++) { int filter_radius; int filter_length; //filter_radius = 3; filter_radius = radii[filter]; filter_length = 2*filter_radius+1; filters[filter] = (float*)malloc(filter_length*filter_length*norients*sizeof(float)); float* currentFilter = filters[filter]; for (int o=0; o<norients; o++) { savgol_filter(currentFilter+filter_length*filter_length*o, 2, filter_radius, float(filter_radius)/4.0f, M_PI/2-o*M_PI/8); /*if(o==1) printf("Filter # %d\n", o); for(int x=0;x<filter_length*filter_length;x++) { printf("%1.4f ", filters3[filter_length*filter_length*o+x]); if((x+1)%filter_length==0) printf("\n"); } printf("\n"); float *f=savgol_filter(f, 2, 3, 0.75, M_PI/2-o*M_PI/8); for(int x=0;x<filter_length*filter_length;x++) { printf("%1.4f ", f[x]); if((x+1)%filter_length==0) printf("\n"); }*/ } } } void writeGrad(char* file, int width, int height, int norients, int nscale, int cuePitchInFloats, float* hostGradient) { int fd; fd = open(file, O_CREAT|O_WRONLY|O_TRUNC, 0666); write(fd, &width, sizeof(int)); write(fd, &height, sizeof(int)); write(fd, &norients, sizeof(int)); write(fd, &nscale, sizeof(int)); for(int scale = 0; scale < nscale; scale++) { for(int orient = 0; orient < norients; orient++) { write(fd, &hostGradient[(scale*norients + orient) * cuePitchInFloats], width*height*sizeof(float)); } } close(fd); } void writeHist(char* file, int width, int height, int norients, int nscale, int x, int y, int orient, int scale, int nbins, float* hostDebug) { int fd; int outWidth = nbins; int outHeight = 2; fd = open(file, O_CREAT|O_WRONLY|O_TRUNC, 0666); write(fd, &outWidth, sizeof(int)); write(fd, &outHeight, sizeof(int)); write(fd, &hostDebug[(y*width + x)*nbins*2], nbins*2*sizeof(float)); close(fd); } void bg(uint width, uint height, uint norients, uint nscale, uint* bgRadii, float** filters, float* devL, float** p_devBg, int cuePitchInFloats, int textonChoice) { hipMalloc((void**)p_devBg, sizeof(float) * cuePitchInFloats * norients * nscale); float* devBg = *p_devBg; uint nbins = 25; float sigma = 0.1; bool blur = true; uint border = GRADIENT_BORDER/*30*/; uint borderWidth = width + 2 * border; uint borderHeight = height + 2 * border; float* devGradients = gradients(devL, nbins, blur, sigma, bgRadii, textonChoice); uint cueTimer; cutCreateTimer(&cueTimer); cutStartTimer(cueTimer); for(int scale = 0; scale < nscale; scale++) { int radius = bgRadii[scale]; int length = 2*radius + 1; float* fDevGra = &devGradients[borderWidth * borderHeight * norients * scale]; gpu_parabola(norients, width, height, border, fDevGra, radius, length, filters[scale], devBg + cuePitchInFloats * norients * scale, cuePitchInFloats); } cutStopTimer(cueTimer); printf(">+< \tBgsmooth: | %f | ms\n", cutGetTimerValue(cueTimer)); } void cg(uint width, uint height, uint norients, uint nscale, uint* cgRadii, float** filters, float* devInput, float** p_devCg, int cuePitchInFloats, int textonChoice) { hipMalloc((void**)p_devCg, sizeof(float) * cuePitchInFloats * norients * nscale); float* devCg = *p_devCg; uint nbins = 25; float sigma = 0.05; bool blur = true; uint border = GRADIENT_BORDER/*30*/; uint borderWidth = width + 2 * border; uint borderHeight = height + 2 * border; float* devGradients = gradients(devInput, nbins, blur, sigma, cgRadii, textonChoice); uint cueTimer; cutCreateTimer(&cueTimer); cutStartTimer(cueTimer); for(int scale = 0; scale < nscale; scale++) { int radius = cgRadii[scale]; int length = 2*radius + 1; gpu_parabola(norients, width, height, border, &devGradients[borderWidth * borderHeight * norients * scale], radius, length, filters[scale], devCg + cuePitchInFloats * norients * scale, cuePitchInFloats); } cutStopTimer(cueTimer); printf(">+< \tCgsmooth: | %f | ms\n", cutGetTimerValue(cueTimer)); } void tg(uint width, uint height, uint norients, uint nscale, uint* tgRadii, float** filters, int* devTextons, float** p_devTg, int cuePitchInFloats, int textonChoice) { hipMalloc((void**)p_devTg, sizeof(float) * cuePitchInFloats * norients * nscale); float* devTg = *p_devTg; uint nbins = 32; if (TEXTON64 == textonChoice) nbins = 64; float sigma = 0; bool blur = false; uint border = GRADIENT_BORDER/*30*/; uint borderWidth = width + 2 * border; uint borderHeight = height + 2 * border; float* devGradients = gradients(devTextons, nbins, blur, sigma, tgRadii, textonChoice); uint cueTimer; cutCreateTimer(&cueTimer); cutStartTimer(cueTimer); for(int scale = 0; scale < nscale; scale++) { int radius = tgRadii[scale]; int length = 2*radius + 1; gpu_parabola(norients, width, height, border, &devGradients[borderWidth * borderHeight * norients * scale], radius, length, filters[scale], devTg + cuePitchInFloats * norients * scale, cuePitchInFloats); } cutStopTimer(cueTimer); printf(">+< \tTgsmooth: | %f | ms\n", cutGetTimerValue(cueTimer)); } void localCues(int width, int height, float* devL, float* devA, float* devB, int* devTextons, float** devBg, float** devCga, float** devCgb, float** devTg, int* p_cuePitchInFloats, int p_nTextonChoice) { printf("Beginning Local cues computation\n"); uint norients = 8; uint nscale = 3; uint border = GRADIENT_BORDER/*10*/; uint maxbins = 64; construct_parabola_filters(4, radii, norients); int nPixels = width * height; int cuePitchInFloats = findPitchInFloats(nPixels); *p_cuePitchInFloats = cuePitchInFloats; initializeGradients(width, height, border, maxbins, norients, nscale, p_nTextonChoice); gpu_parabola_init(norients, width, height, border); uint cueTimer; cutCreateTimer(&cueTimer); cutStartTimer(cueTimer); bg(width, height, norients, nscale, &radii[0], &filters[0], devL, devBg, cuePitchInFloats, p_nTextonChoice); cutStopTimer(cueTimer); printf(">+< \tBg: | %f | ms\n", cutGetTimerValue(cueTimer)); cutResetTimer(cueTimer); cutStartTimer(cueTimer); cg(width, height, norients, nscale, &radii[1], &filters[1], devA, devCga, cuePitchInFloats, p_nTextonChoice); cutStopTimer(cueTimer); printf(">+< \tCga: | %f | ms\n", cutGetTimerValue(cueTimer)); cutResetTimer(cueTimer); cutStartTimer(cueTimer); cg(width, height, norients, nscale, &radii[1], &filters[1], devB, devCgb, cuePitchInFloats, p_nTextonChoice); cutStopTimer(cueTimer); printf(">+< \tCgb: | %f | ms\n", cutGetTimerValue(cueTimer)); cutResetTimer(cueTimer); cutStartTimer(cueTimer); tg(width, height, norients, nscale, &radii[1], &filters[1], devTextons, devTg, cuePitchInFloats, p_nTextonChoice); cutStopTimer(cueTimer); printf(">+< \tTg: | %f | ms\n", cutGetTimerValue(cueTimer)); cutResetTimer(cueTimer); hipDeviceSynchronize(); finalizeGradients(); gpu_parabola_cleanup(); printf("Completed Local cues\n"); } /* int main(int argc, char** argv) */ /* { */ /* norients = 8; */ /* nbins = 25; */ /* file = "data/L.dat"; */ /* read_dims(); */ /* read_parabola_filters(); */ /* gpu_gradient_init(norients, width, height, border); */ /* gpu_parabola_init(norients, width, height, border); */ /* int ts1, ts2; */ /* ts1 = timestamp(); */ /* bg(); */ /* cga(); */ /* cgb(); */ /* tg(); */ /* ts2 = timestamp(); */ /* printf("gpu_time = %fms\n", ((double)ts2-(double)ts1)/1000); */ /* gpu_gradient_cleanup(); */ /* gpu_parabola_cleanup(); */ /* } */
5ee80ee78881d50827249a055c5908c8b8e2c42f.cu
#include <stdio.h> #include <fcntl.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <string.h> #include "spec.h" #include "gradient.h" #include "convert.h" #include "stencilMVM.h" #include "windows_acml.h" #define TEXTON32 1 #define TEXTON64 2 uint radii[4]={3,5,10,20}; float** filters; void savgol_filter(float* filt, int d, float inra, float inrb, float theta) { int k=1; assert(d>0); assert(k>=1 && k<=d+1); float ra=max(inra,1.5); float rb=max(inrb,1.5); float ira2=1.0/(ra*ra); float irb2 = 1.0/(rb*rb); int wr = int(floor(max(ra,rb))+0.5); int wd = 2*wr+1; float sint = sin(theta); float cost = cos(theta); //float* filt = new float[wd*wd]; memset(filt,0,sizeof(float)*wd*wd); float* xx= new float[2*d+1]; memset(xx,0,sizeof(float)*(2*d+1)); for(int u=-wr;u<=wr;u++) { for(int v=-wr;v<=wr;v++) { float ai=-u*sint+v*cost; float bi=u*cost+v*sint; if(ai*ai*ira2 + bi*bi*irb2 > 1) { continue; } for(int i=0;i<2*d+1;i++) { xx[i]=xx[i]+pow(ai,i); } } } float *A=new float[(d+1)*(d+1)]; for(int i=0;i<d+1;i++) { for(int j=0;j<d+1;j++) { A[i*(d+1)+j] = xx[i+j]; } } //float *invA=new float[(d+1)*(d+1)]; //A = inv(A) int info; int* ipiv = new int[d+1]; float* work=new float[d+1]; int kk = d+1; /*sgetrf*/LAPACK_sgetrf(&kk, &kk, A, &kk, ipiv, &info); /*sgetri*/LAPACK_sgetri(&kk, A, &kk, ipiv, work, &kk, &info); for(int u=-wr;u<=wr;u++) { for(int v=-wr;v<=wr;v++) { float ai=-u*sint+v*cost; float bi=u*cost+v*sint; if(ai*ai*ira2 + bi*bi*irb2 > 1) { continue; } for(int i=0;i<d+1;i++) { filt[(v+wr)*wd+u+wr] += A[i]*pow(ai,i); //doing only k=1 } } } delete [] A; delete [] ipiv; delete [] work; delete [] xx; } void construct_parabola_filters(uint number, uint* radii, uint norients) { filters = (float**)malloc(sizeof(float*) * number); for(int filter = 0; filter < number; filter++) { int filter_radius; int filter_length; //filter_radius = 3; filter_radius = radii[filter]; filter_length = 2*filter_radius+1; filters[filter] = (float*)malloc(filter_length*filter_length*norients*sizeof(float)); float* currentFilter = filters[filter]; for (int o=0; o<norients; o++) { savgol_filter(currentFilter+filter_length*filter_length*o, 2, filter_radius, float(filter_radius)/4.0f, M_PI/2-o*M_PI/8); /*if(o==1) printf("Filter # %d\n", o); for(int x=0;x<filter_length*filter_length;x++) { printf("%1.4f ", filters3[filter_length*filter_length*o+x]); if((x+1)%filter_length==0) printf("\n"); } printf("\n"); float *f=savgol_filter(f, 2, 3, 0.75, M_PI/2-o*M_PI/8); for(int x=0;x<filter_length*filter_length;x++) { printf("%1.4f ", f[x]); if((x+1)%filter_length==0) printf("\n"); }*/ } } } void writeGrad(char* file, int width, int height, int norients, int nscale, int cuePitchInFloats, float* hostGradient) { int fd; fd = open(file, O_CREAT|O_WRONLY|O_TRUNC, 0666); write(fd, &width, sizeof(int)); write(fd, &height, sizeof(int)); write(fd, &norients, sizeof(int)); write(fd, &nscale, sizeof(int)); for(int scale = 0; scale < nscale; scale++) { for(int orient = 0; orient < norients; orient++) { write(fd, &hostGradient[(scale*norients + orient) * cuePitchInFloats], width*height*sizeof(float)); } } close(fd); } void writeHist(char* file, int width, int height, int norients, int nscale, int x, int y, int orient, int scale, int nbins, float* hostDebug) { int fd; int outWidth = nbins; int outHeight = 2; fd = open(file, O_CREAT|O_WRONLY|O_TRUNC, 0666); write(fd, &outWidth, sizeof(int)); write(fd, &outHeight, sizeof(int)); write(fd, &hostDebug[(y*width + x)*nbins*2], nbins*2*sizeof(float)); close(fd); } void bg(uint width, uint height, uint norients, uint nscale, uint* bgRadii, float** filters, float* devL, float** p_devBg, int cuePitchInFloats, int textonChoice) { cudaMalloc((void**)p_devBg, sizeof(float) * cuePitchInFloats * norients * nscale); float* devBg = *p_devBg; uint nbins = 25; float sigma = 0.1; bool blur = true; uint border = GRADIENT_BORDER/*30*/; uint borderWidth = width + 2 * border; uint borderHeight = height + 2 * border; float* devGradients = gradients(devL, nbins, blur, sigma, bgRadii, textonChoice); uint cueTimer; cutCreateTimer(&cueTimer); cutStartTimer(cueTimer); for(int scale = 0; scale < nscale; scale++) { int radius = bgRadii[scale]; int length = 2*radius + 1; float* fDevGra = &devGradients[borderWidth * borderHeight * norients * scale]; gpu_parabola(norients, width, height, border, fDevGra, radius, length, filters[scale], devBg + cuePitchInFloats * norients * scale, cuePitchInFloats); } cutStopTimer(cueTimer); printf(">+< \tBgsmooth: | %f | ms\n", cutGetTimerValue(cueTimer)); } void cg(uint width, uint height, uint norients, uint nscale, uint* cgRadii, float** filters, float* devInput, float** p_devCg, int cuePitchInFloats, int textonChoice) { cudaMalloc((void**)p_devCg, sizeof(float) * cuePitchInFloats * norients * nscale); float* devCg = *p_devCg; uint nbins = 25; float sigma = 0.05; bool blur = true; uint border = GRADIENT_BORDER/*30*/; uint borderWidth = width + 2 * border; uint borderHeight = height + 2 * border; float* devGradients = gradients(devInput, nbins, blur, sigma, cgRadii, textonChoice); uint cueTimer; cutCreateTimer(&cueTimer); cutStartTimer(cueTimer); for(int scale = 0; scale < nscale; scale++) { int radius = cgRadii[scale]; int length = 2*radius + 1; gpu_parabola(norients, width, height, border, &devGradients[borderWidth * borderHeight * norients * scale], radius, length, filters[scale], devCg + cuePitchInFloats * norients * scale, cuePitchInFloats); } cutStopTimer(cueTimer); printf(">+< \tCgsmooth: | %f | ms\n", cutGetTimerValue(cueTimer)); } void tg(uint width, uint height, uint norients, uint nscale, uint* tgRadii, float** filters, int* devTextons, float** p_devTg, int cuePitchInFloats, int textonChoice) { cudaMalloc((void**)p_devTg, sizeof(float) * cuePitchInFloats * norients * nscale); float* devTg = *p_devTg; uint nbins = 32; if (TEXTON64 == textonChoice) nbins = 64; float sigma = 0; bool blur = false; uint border = GRADIENT_BORDER/*30*/; uint borderWidth = width + 2 * border; uint borderHeight = height + 2 * border; float* devGradients = gradients(devTextons, nbins, blur, sigma, tgRadii, textonChoice); uint cueTimer; cutCreateTimer(&cueTimer); cutStartTimer(cueTimer); for(int scale = 0; scale < nscale; scale++) { int radius = tgRadii[scale]; int length = 2*radius + 1; gpu_parabola(norients, width, height, border, &devGradients[borderWidth * borderHeight * norients * scale], radius, length, filters[scale], devTg + cuePitchInFloats * norients * scale, cuePitchInFloats); } cutStopTimer(cueTimer); printf(">+< \tTgsmooth: | %f | ms\n", cutGetTimerValue(cueTimer)); } void localCues(int width, int height, float* devL, float* devA, float* devB, int* devTextons, float** devBg, float** devCga, float** devCgb, float** devTg, int* p_cuePitchInFloats, int p_nTextonChoice) { printf("Beginning Local cues computation\n"); uint norients = 8; uint nscale = 3; uint border = GRADIENT_BORDER/*10*/; uint maxbins = 64; construct_parabola_filters(4, radii, norients); int nPixels = width * height; int cuePitchInFloats = findPitchInFloats(nPixels); *p_cuePitchInFloats = cuePitchInFloats; initializeGradients(width, height, border, maxbins, norients, nscale, p_nTextonChoice); gpu_parabola_init(norients, width, height, border); uint cueTimer; cutCreateTimer(&cueTimer); cutStartTimer(cueTimer); bg(width, height, norients, nscale, &radii[0], &filters[0], devL, devBg, cuePitchInFloats, p_nTextonChoice); cutStopTimer(cueTimer); printf(">+< \tBg: | %f | ms\n", cutGetTimerValue(cueTimer)); cutResetTimer(cueTimer); cutStartTimer(cueTimer); cg(width, height, norients, nscale, &radii[1], &filters[1], devA, devCga, cuePitchInFloats, p_nTextonChoice); cutStopTimer(cueTimer); printf(">+< \tCga: | %f | ms\n", cutGetTimerValue(cueTimer)); cutResetTimer(cueTimer); cutStartTimer(cueTimer); cg(width, height, norients, nscale, &radii[1], &filters[1], devB, devCgb, cuePitchInFloats, p_nTextonChoice); cutStopTimer(cueTimer); printf(">+< \tCgb: | %f | ms\n", cutGetTimerValue(cueTimer)); cutResetTimer(cueTimer); cutStartTimer(cueTimer); tg(width, height, norients, nscale, &radii[1], &filters[1], devTextons, devTg, cuePitchInFloats, p_nTextonChoice); cutStopTimer(cueTimer); printf(">+< \tTg: | %f | ms\n", cutGetTimerValue(cueTimer)); cutResetTimer(cueTimer); cudaThreadSynchronize(); finalizeGradients(); gpu_parabola_cleanup(); printf("Completed Local cues\n"); } /* int main(int argc, char** argv) */ /* { */ /* norients = 8; */ /* nbins = 25; */ /* file = "data/L.dat"; */ /* read_dims(); */ /* read_parabola_filters(); */ /* gpu_gradient_init(norients, width, height, border); */ /* gpu_parabola_init(norients, width, height, border); */ /* int ts1, ts2; */ /* ts1 = timestamp(); */ /* bg(); */ /* cga(); */ /* cgb(); */ /* tg(); */ /* ts2 = timestamp(); */ /* printf("gpu_time = %fms\n", ((double)ts2-(double)ts1)/1000); */ /* gpu_gradient_cleanup(); */ /* gpu_parabola_cleanup(); */ /* } */
7e87811a6f38bee07b1954dfc094d02c3fde0d15.hip
// !!! This is a file automatically generated by hipify!!! #include "mex.h" #include "matrix.h" #include "gpu/mxGPUArray.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <rocblas.h> #include <cassert> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/device_functions.h> #define VL_CUDA_NUM_THREADS 1024 #define maxStreams 16 // maximum number of streams used for concurrency using namespace std; inline size_t divideAndRoundUp(size_t a, size_t b) { return (a + b - 1) / b; } __global__ void setToOnes(float *data, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; // 1D grid of 1D blocks if (index < size) data[index] = 1; } void createStreams(hipStream_t *streams, int N) { for (int i = 0; i < N; i++) { hipStreamCreate(&(streams[i])); } } void destroyStreams(hipStream_t *streams, int N) { for (int i = 0; i < N; i++) { hipStreamDestroy(streams[i]); } } bool speedup_sum(float *output, float const *input, unsigned int const *nodeSize, unsigned int const Nin, unsigned int const Nout, unsigned int const Fin) { assert(output); assert(input); hipError_t hipError_t; hipblasStatus_t cublasError; hipblasHandle_t handle; hipStream_t *streams = NULL; bool status = true; hipblasCreate(&handle); streams = (hipStream_t *)malloc(maxStreams * sizeof(hipStream_t)); createStreams(streams, maxStreams); float *allOnesMemory = NULL; hipError_t = hipMalloc((void **)&allOnesMemory, Nin * sizeof(float)); if (hipError_t != hipSuccess) { goto done; } setToOnes << < divideAndRoundUp(Nin, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS, 0, streams[0] >> > ((float *)allOnesMemory, Nin); hipError_t = hipDeviceSynchronize(); if (hipError_t != hipSuccess) { goto done; } // parent-wise memcpy or summation float alpha, beta; alpha = 1; beta = 0; int count; count = 0; for (int iter = 0; iter < Nout; ++iter) { unsigned int convSize = nodeSize[iter]; ptrdiff_t inputOffset = Fin * count; ptrdiff_t outputOffset = Fin * iter; if (convSize == 1) // copy if for-loop end with convSize=1 nodes { hipError_t = hipMemcpyAsync((float *)output + outputOffset, (float const *)input + inputOffset, Fin * sizeof(float), hipMemcpyDeviceToDevice, streams[iter % maxStreams]); if (hipError_t != hipSuccess) { goto done; } } else { hipblasSetStream(handle, streams[iter % maxStreams]); cublasError = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, Fin, 1, convSize, &alpha, (float const *)input + inputOffset, Fin, (float const *)allOnesMemory, 1, &beta, (float *)output + outputOffset, Fin); if (cublasError != HIPBLAS_STATUS_SUCCESS) { goto done; } } count += convSize; } hipError_t = hipDeviceSynchronize(); if (hipError_t != hipSuccess) { goto done; } done: if (hipError_t != hipSuccess || cublasError != HIPBLAS_STATUS_SUCCESS) { status = false; } if (allOnesMemory) hipFree(allOnesMemory); if (streams) destroyStreams(streams, maxStreams); hipblasDestroy(handle); return status; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { float const *input; float *output; unsigned int Fin, Nin, Nout; unsigned int const *nodeSize; bool err = false; char *errMsg; mxGPUArray const *dev_input; mxGPUArray *dev_output; mwSize const *dims; mwSize dims_4D[] = { 1, 1, 1, 1 }; mwSize ndim; mxInitGPU(); /* Initialize the MathWorks GPU API. */ /* -------------------------------------------------------------- */ /* Check the input arguments */ /* -------------------------------------------------------------- */ if (nrhs < 2) { mexErrMsgTxt("Not enough input arguments."); } if (nrhs > 2) { mexErrMsgTxt("Too many inputs."); } // get pointer to gpuArray input dev_input = mxGPUCreateFromMxArray(prhs[0]); if (mxGPUGetClassID(dev_input) == mxSINGLE_CLASS) { input = (float const *)mxGPUGetDataReadOnly(dev_input); } else { mexErrMsgTxt("Only single format of network input is supported."); } // get pointer to cpuArray map if (mxGetClassID(prhs[1]) == mxUINT32_CLASS) { nodeSize = (unsigned int const *)mxGetData(prhs[1]); Nout = mxGetNumberOfElements(prhs[1]); } else { mexErrMsgTxt("Only unsigned int format of nodeSize is supported."); } // parse the network input ndim = mxGPUGetNumberOfDimensions(dev_input); dims = mxGPUGetDimensions(dev_input); if (ndim<3 || ndim>4) { mexErrMsgTxt("The network input must be a 4D matrix."); } else { Fin = dims[0] * dims[1] * dims[2]; Nin = (ndim == 3) ? 1 : dims[3]; } /* -------------------------------------------------------------- */ /* Do the work*/ /* -------------------------------------------------------------- */ dims_4D[2] = Fin; dims_4D[3] = Nout; dev_output = mxGPUCreateGPUArray(4, dims_4D, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); output = (float *)mxGPUGetData(dev_output); bool status = speedup_sum(output, input, nodeSize, Nin, Nout, Fin); if (status) { plhs[0] = mxGPUCreateMxArrayOnGPU(dev_output); } else { err = true; errMsg = "The speed-up summation is failed."; } mxGPUDestroyGPUArray(dev_input); mxGPUDestroyGPUArray(dev_output); if (err) mexErrMsgTxt(errMsg); }
7e87811a6f38bee07b1954dfc094d02c3fde0d15.cu
#include "mex.h" #include "matrix.h" #include "gpu/mxGPUArray.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cublas_v2.h> #include <cassert> #include <stdlib.h> #include <cuda.h> #include <device_functions.h> #define VL_CUDA_NUM_THREADS 1024 #define maxStreams 16 // maximum number of streams used for concurrency using namespace std; inline size_t divideAndRoundUp(size_t a, size_t b) { return (a + b - 1) / b; } __global__ void setToOnes(float *data, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x; // 1D grid of 1D blocks if (index < size) data[index] = 1; } void createStreams(cudaStream_t *streams, int N) { for (int i = 0; i < N; i++) { cudaStreamCreate(&(streams[i])); } } void destroyStreams(cudaStream_t *streams, int N) { for (int i = 0; i < N; i++) { cudaStreamDestroy(streams[i]); } } bool speedup_sum(float *output, float const *input, unsigned int const *nodeSize, unsigned int const Nin, unsigned int const Nout, unsigned int const Fin) { assert(output); assert(input); cudaError_t cudaError; cublasStatus_t cublasError; cublasHandle_t handle; cudaStream_t *streams = NULL; bool status = true; cublasCreate(&handle); streams = (cudaStream_t *)malloc(maxStreams * sizeof(cudaStream_t)); createStreams(streams, maxStreams); float *allOnesMemory = NULL; cudaError = cudaMalloc((void **)&allOnesMemory, Nin * sizeof(float)); if (cudaError != cudaSuccess) { goto done; } setToOnes << < divideAndRoundUp(Nin, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS, 0, streams[0] >> > ((float *)allOnesMemory, Nin); cudaError = cudaDeviceSynchronize(); if (cudaError != cudaSuccess) { goto done; } // parent-wise memcpy or summation float alpha, beta; alpha = 1; beta = 0; int count; count = 0; for (int iter = 0; iter < Nout; ++iter) { unsigned int convSize = nodeSize[iter]; ptrdiff_t inputOffset = Fin * count; ptrdiff_t outputOffset = Fin * iter; if (convSize == 1) // copy if for-loop end with convSize=1 nodes { cudaError = cudaMemcpyAsync((float *)output + outputOffset, (float const *)input + inputOffset, Fin * sizeof(float), cudaMemcpyDeviceToDevice, streams[iter % maxStreams]); if (cudaError != cudaSuccess) { goto done; } } else { cublasSetStream(handle, streams[iter % maxStreams]); cublasError = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, Fin, 1, convSize, &alpha, (float const *)input + inputOffset, Fin, (float const *)allOnesMemory, 1, &beta, (float *)output + outputOffset, Fin); if (cublasError != CUBLAS_STATUS_SUCCESS) { goto done; } } count += convSize; } cudaError = cudaDeviceSynchronize(); if (cudaError != cudaSuccess) { goto done; } done: if (cudaError != cudaSuccess || cublasError != CUBLAS_STATUS_SUCCESS) { status = false; } if (allOnesMemory) cudaFree(allOnesMemory); if (streams) destroyStreams(streams, maxStreams); cublasDestroy(handle); return status; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { float const *input; float *output; unsigned int Fin, Nin, Nout; unsigned int const *nodeSize; bool err = false; char *errMsg; mxGPUArray const *dev_input; mxGPUArray *dev_output; mwSize const *dims; mwSize dims_4D[] = { 1, 1, 1, 1 }; mwSize ndim; mxInitGPU(); /* Initialize the MathWorks GPU API. */ /* -------------------------------------------------------------- */ /* Check the input arguments */ /* -------------------------------------------------------------- */ if (nrhs < 2) { mexErrMsgTxt("Not enough input arguments."); } if (nrhs > 2) { mexErrMsgTxt("Too many inputs."); } // get pointer to gpuArray input dev_input = mxGPUCreateFromMxArray(prhs[0]); if (mxGPUGetClassID(dev_input) == mxSINGLE_CLASS) { input = (float const *)mxGPUGetDataReadOnly(dev_input); } else { mexErrMsgTxt("Only single format of network input is supported."); } // get pointer to cpuArray map if (mxGetClassID(prhs[1]) == mxUINT32_CLASS) { nodeSize = (unsigned int const *)mxGetData(prhs[1]); Nout = mxGetNumberOfElements(prhs[1]); } else { mexErrMsgTxt("Only unsigned int format of nodeSize is supported."); } // parse the network input ndim = mxGPUGetNumberOfDimensions(dev_input); dims = mxGPUGetDimensions(dev_input); if (ndim<3 || ndim>4) { mexErrMsgTxt("The network input must be a 4D matrix."); } else { Fin = dims[0] * dims[1] * dims[2]; Nin = (ndim == 3) ? 1 : dims[3]; } /* -------------------------------------------------------------- */ /* Do the work*/ /* -------------------------------------------------------------- */ dims_4D[2] = Fin; dims_4D[3] = Nout; dev_output = mxGPUCreateGPUArray(4, dims_4D, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE); output = (float *)mxGPUGetData(dev_output); bool status = speedup_sum(output, input, nodeSize, Nin, Nout, Fin); if (status) { plhs[0] = mxGPUCreateMxArrayOnGPU(dev_output); } else { err = true; errMsg = "The speed-up summation is failed."; } mxGPUDestroyGPUArray(dev_input); mxGPUDestroyGPUArray(dev_output); if (err) mexErrMsgTxt(errMsg); }
9af7e93f016551d74cc6b8c799d1e87d60dbb95d.hip
// !!! This is a file automatically generated by hipify!!! #include "TaskExecutor.hpp" #include "kernels/ActivationKernel.cuh" #include "kernels/AdamKernel.cuh" #include "kernels/SoftmaxKernel.cuh" #include "kernels/BackwardDeltaKernel.cuh" #include "kernels/GradientIncrementKernel.cuh" #include "kernels/MatrixFillKernel.cuh" #include "kernels/MatrixScaleKernel.cuh" #include "kernels/TransposeKernel.cuh" #include "kernels/WeightedIncrementKernel.cuh" #include "kernels/ErrorMeasureKernel.cuh" #include "kernels/TargetValuesKernel.cuh" #include "Util.cuh" #include <hip/hip_runtime.h> using namespace rnn; using namespace rnn::cuda; struct TaskExecutor::TaskExecutorImpl { hipStream_t stream; TaskExecutorImpl() { hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); } ~TaskExecutorImpl() { hipStreamDestroy(stream); } void Execute(const Task &t) { hipError_t err; switch(t.type) { case TaskType::LAYER_ACTIVATION: if (t.data.layerActivationData.activation == LayerActivation::SOFTMAX) { SoftmaxKernel::Apply(t.data.layerActivationData.layer, stream); } else { ActivationKernel::Apply( t.data.layerActivationData.layer, t.data.layerActivationData.activation, stream); } return; case TaskType::ERROR_MEASURE: ErrorMeasureKernel::Apply(t.data.errorMeasureData.networkOutput, t.data.errorMeasureData.targetOutput, t.data.errorMeasureData.deltaMask, t.data.errorMeasureData.outputLayer, stream); return; case TaskType::PROPAGATE_DELTA: BackwardDeltaKernel::Apply(t.data.propagateDeltaData.nextDelta, t.data.propagateDeltaData.transposedWeights, t.data.propagateDeltaData.connection, t.data.propagateDeltaData.outDelta, stream); return; case TaskType::GRADIENT_INCREMENT: GradientIncrementKernel::Apply(t.data.gradientIncrementData.layerDeltas, t.data.gradientIncrementData.connection, t.data.gradientIncrementData.outGradient, stream); return; case TaskType::FILL_MATRIX: MatrixFillKernel::Apply(t.data.fillMatrixData.target, t.data.fillMatrixData.value, stream); return; case TaskType::SCALE_MATRIX: MatrixScaleKernel::Apply(t.data.scaleMatrixData.target, t.data.scaleMatrixData.scale, stream); return; case TaskType::TRANSPOSE_MATRIX: TransposeKernel::Apply(t.data.transposeMatrixData.src, t.data.transposeMatrixData.dst, stream); return; case TaskType::FORWARD_INCREMENT: WeightedIncrementKernel::Apply(t.data.forwardIncrementData.layerWeights, t.data.forwardIncrementData.input, t.data.forwardIncrementData.output, stream); return; case TaskType::TARGET_QVALUES: TargetValuesKernel::Apply(t.data.targetQValuesData.nextTargetActivation, t.data.targetQValuesData.nextActionMask, t.data.targetQValuesData.batchRewards, t.data.targetQValuesData.discountFactor, t.data.targetQValuesData.useOnlyReward, t.data.targetQValuesData.outTargetValue, stream); return; case TaskType::ADAM_UPDATE: AdamKernel::UpdateMomentumAndRMS( t.data.adamUpdateData.gradient, t.data.adamUpdateData.momentum, t.data.adamUpdateData.rms, t.data.adamUpdateData.beta1, t.data.adamUpdateData.beta2, stream); return; case TaskType::ADAM_INCREMENT: AdamKernel::UpdateWeightsWithAdam( t.data.adamIncrementData.weights, t.data.adamIncrementData.momentum, t.data.adamIncrementData.rms, t.data.adamIncrementData.beta1, t.data.adamIncrementData.beta2, t.data.adamIncrementData.lr, t.data.adamIncrementData.epsilon, stream); return; case TaskType::COPY_MATRIX_D2H: err = hipMemcpy2DAsync( t.data.copyMatrixD2HData.dst.data, t.data.copyMatrixD2HData.dst.cols * sizeof(float), t.data.copyMatrixD2HData.src.data, t.data.copyMatrixD2HData.src.pitch, t.data.copyMatrixD2HData.src.cols * sizeof(float), t.data.copyMatrixD2HData.src.rows, hipMemcpyDeviceToHost, stream); CheckError(err); return; case TaskType::COPY_MATRIX_H2D: err = hipMemcpy2DAsync( t.data.copyMatrixH2DData.dst.data, t.data.copyMatrixH2DData.dst.pitch, t.data.copyMatrixH2DData.src.data, t.data.copyMatrixH2DData.src.cols * sizeof(float), t.data.copyMatrixH2DData.src.cols * sizeof(float), t.data.copyMatrixH2DData.src.rows, hipMemcpyHostToDevice, stream); CheckError(err); return; case TaskType::COPY_MATRIX_D2D: err = hipMemcpy2DAsync( t.data.copyMatrixD2DData.dst.data, t.data.copyMatrixD2DData.dst.pitch, t.data.copyMatrixD2DData.src.data, t.data.copyMatrixD2DData.src.pitch, t.data.copyMatrixD2DData.src.cols * sizeof(float), t.data.copyMatrixD2DData.src.rows, hipMemcpyDeviceToDevice, stream); CheckError(err); return; default: assert(false); } } }; TaskExecutor::TaskExecutor() : impl(new TaskExecutorImpl()) {} TaskExecutor::~TaskExecutor() = default; void TaskExecutor::Execute(const Task &task) { impl->Execute(task); }
9af7e93f016551d74cc6b8c799d1e87d60dbb95d.cu
#include "TaskExecutor.hpp" #include "kernels/ActivationKernel.cuh" #include "kernels/AdamKernel.cuh" #include "kernels/SoftmaxKernel.cuh" #include "kernels/BackwardDeltaKernel.cuh" #include "kernels/GradientIncrementKernel.cuh" #include "kernels/MatrixFillKernel.cuh" #include "kernels/MatrixScaleKernel.cuh" #include "kernels/TransposeKernel.cuh" #include "kernels/WeightedIncrementKernel.cuh" #include "kernels/ErrorMeasureKernel.cuh" #include "kernels/TargetValuesKernel.cuh" #include "Util.cuh" #include <cuda_runtime.h> using namespace rnn; using namespace rnn::cuda; struct TaskExecutor::TaskExecutorImpl { cudaStream_t stream; TaskExecutorImpl() { cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); } ~TaskExecutorImpl() { cudaStreamDestroy(stream); } void Execute(const Task &t) { cudaError_t err; switch(t.type) { case TaskType::LAYER_ACTIVATION: if (t.data.layerActivationData.activation == LayerActivation::SOFTMAX) { SoftmaxKernel::Apply(t.data.layerActivationData.layer, stream); } else { ActivationKernel::Apply( t.data.layerActivationData.layer, t.data.layerActivationData.activation, stream); } return; case TaskType::ERROR_MEASURE: ErrorMeasureKernel::Apply(t.data.errorMeasureData.networkOutput, t.data.errorMeasureData.targetOutput, t.data.errorMeasureData.deltaMask, t.data.errorMeasureData.outputLayer, stream); return; case TaskType::PROPAGATE_DELTA: BackwardDeltaKernel::Apply(t.data.propagateDeltaData.nextDelta, t.data.propagateDeltaData.transposedWeights, t.data.propagateDeltaData.connection, t.data.propagateDeltaData.outDelta, stream); return; case TaskType::GRADIENT_INCREMENT: GradientIncrementKernel::Apply(t.data.gradientIncrementData.layerDeltas, t.data.gradientIncrementData.connection, t.data.gradientIncrementData.outGradient, stream); return; case TaskType::FILL_MATRIX: MatrixFillKernel::Apply(t.data.fillMatrixData.target, t.data.fillMatrixData.value, stream); return; case TaskType::SCALE_MATRIX: MatrixScaleKernel::Apply(t.data.scaleMatrixData.target, t.data.scaleMatrixData.scale, stream); return; case TaskType::TRANSPOSE_MATRIX: TransposeKernel::Apply(t.data.transposeMatrixData.src, t.data.transposeMatrixData.dst, stream); return; case TaskType::FORWARD_INCREMENT: WeightedIncrementKernel::Apply(t.data.forwardIncrementData.layerWeights, t.data.forwardIncrementData.input, t.data.forwardIncrementData.output, stream); return; case TaskType::TARGET_QVALUES: TargetValuesKernel::Apply(t.data.targetQValuesData.nextTargetActivation, t.data.targetQValuesData.nextActionMask, t.data.targetQValuesData.batchRewards, t.data.targetQValuesData.discountFactor, t.data.targetQValuesData.useOnlyReward, t.data.targetQValuesData.outTargetValue, stream); return; case TaskType::ADAM_UPDATE: AdamKernel::UpdateMomentumAndRMS( t.data.adamUpdateData.gradient, t.data.adamUpdateData.momentum, t.data.adamUpdateData.rms, t.data.adamUpdateData.beta1, t.data.adamUpdateData.beta2, stream); return; case TaskType::ADAM_INCREMENT: AdamKernel::UpdateWeightsWithAdam( t.data.adamIncrementData.weights, t.data.adamIncrementData.momentum, t.data.adamIncrementData.rms, t.data.adamIncrementData.beta1, t.data.adamIncrementData.beta2, t.data.adamIncrementData.lr, t.data.adamIncrementData.epsilon, stream); return; case TaskType::COPY_MATRIX_D2H: err = cudaMemcpy2DAsync( t.data.copyMatrixD2HData.dst.data, t.data.copyMatrixD2HData.dst.cols * sizeof(float), t.data.copyMatrixD2HData.src.data, t.data.copyMatrixD2HData.src.pitch, t.data.copyMatrixD2HData.src.cols * sizeof(float), t.data.copyMatrixD2HData.src.rows, cudaMemcpyDeviceToHost, stream); CheckError(err); return; case TaskType::COPY_MATRIX_H2D: err = cudaMemcpy2DAsync( t.data.copyMatrixH2DData.dst.data, t.data.copyMatrixH2DData.dst.pitch, t.data.copyMatrixH2DData.src.data, t.data.copyMatrixH2DData.src.cols * sizeof(float), t.data.copyMatrixH2DData.src.cols * sizeof(float), t.data.copyMatrixH2DData.src.rows, cudaMemcpyHostToDevice, stream); CheckError(err); return; case TaskType::COPY_MATRIX_D2D: err = cudaMemcpy2DAsync( t.data.copyMatrixD2DData.dst.data, t.data.copyMatrixD2DData.dst.pitch, t.data.copyMatrixD2DData.src.data, t.data.copyMatrixD2DData.src.pitch, t.data.copyMatrixD2DData.src.cols * sizeof(float), t.data.copyMatrixD2DData.src.rows, cudaMemcpyDeviceToDevice, stream); CheckError(err); return; default: assert(false); } } }; TaskExecutor::TaskExecutor() : impl(new TaskExecutorImpl()) {} TaskExecutor::~TaskExecutor() = default; void TaskExecutor::Execute(const Task &task) { impl->Execute(task); }
a85afa686188dc759b37a7c04d26c90ab9a08837.hip
// !!! This is a file automatically generated by hipify!!! // Position-Based Real-Time Simulation of Large Crowds // Copyright (c) 2020, Tomer Weiss // // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER // IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Original author: Tomer Weiss <http://www.cs.ucla.edu/~tweiss> #include <cuda_gl_interop.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <string> #include <time.h> #include <unistd.h> #include <unordered_map> #include <vector> #include <stdio.h> #include <GLFW/glfw3.h> #include <OpenGL/gl.h> #include <OpenGL/glu.h> #include <glm/glm.hpp> #include <sys/time.h> #ifdef __HIPCC__ #define CUDA_CALLABLE __host__ __device__ #else #define CUDA_CALLABLE #endif #define OUT_PATH "../out/" /* ========================= */ /* Simulation Engine Params: */ #define BLOCK_SIZE 256 #define _M_PI 3.14159265358979323846f #define K_NOT_USED -1 #define _EPSILON 0.00001f #define EPS 0.0000097 #define MS_PER_UPDATE 0.02 // 0.018 #define KSI 0.01 // 0.0093 //0.005/0.54 #define ALPHA 1.2 #define ITER_COUNT 1 #define MAX_ACCEL 20.0f #define MAX_SPEED 10.4f #define V_PREF_ACCEL 1.4f #define KSI_ACCEL 0.54f #define NN_ACCEL 10.0f /* ========================= */ /* Scenario Params: */ #define WIDTH 1280 #define HEIGHT 720 #define ROWS 16 #define COLS 36 #define GROUND_HEIGHT 45.0 #define GRID_UP_BOUND -436.0 #define GRID_LOW_BOND GROUND_HEIGHT + 20 #define LEFT_BOUND_X -285.0 #define RIGHT_BOUND_X 285.0 /* ========================= */ typedef unsigned char BYTE; typedef unsigned int uint; // Kernel definition __global__ void VecAdd(float *A, float *B, float *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } uint iDivUp(uint a, uint b) { return (a % b != 0) ? (a / b + 1) : (a / b); } class particle_tuple { public: int i; int j; particle_tuple(int i, int j) { this->i = i; this->j = j; } }; void save_file(BYTE *pixels, char *file_name, int height, int width) { FILE *imageFile; int x, y; BYTE pixel; imageFile = fopen("image.pgm", "wb"); if (imageFile == NULL) { perror("ERROR: Cannot open output file"); exit(EXIT_FAILURE); } fprintf(imageFile, "P5\n"); // P5 filetype fprintf(imageFile, "%d %d\n", width, height); // dimensions fprintf(imageFile, "255\n"); // Max pixel /* Now write a greyscale ramp */ for (x = 0; x < height; x++) { for (y = 0; y < width; y++) { pixel = pixels[x * height + y]; fputc(pixel, imageFile); } } fclose(imageFile); } // worry about destuctor later std::vector<particle_tuple *> get_tuples(int n) { std::vector<particle_tuple *> tuples; if (n >= 2) { for (int i = 0; i < n; i++) { for (int j = i + 1; j < n; j++) { tuples.push_back(new particle_tuple(i, j)); } } } else { printf("Error: only one particle\n"); } printf("\n"); return tuples; } float min(const float &a, const float &b) { return (a < b) ? a : b; } float norm(const float2 &a) { return sqrtf(a.x * a.x + a.y * a.y); } float distance(const float2 &a, const float2 &b) { return sqrtf((a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y)); } float distance_ground(const float2 &a, const float &rad, const float &ground_y) { float res = ground_y - a.y - rad; return res; } float dot(const float2 &a, const float2 &b) { return a.x * b.x + a.y * b.y; } void project_on_vector(const float2 &a, const float2 &b_normalized, float2 &out) { float d = dot(a, b_normalized); out.x = b_normalized.x * d; out.y = b_normalized.y * d; } void clamp(float2 &v, float maxValue) { float lengthV = sqrtf(v.x * v.x + v.y * v.y); if (lengthV > maxValue) { float mult = (maxValue / lengthV); v.x *= mult; v.y *= mult; } } class Wall { public: float2 x0; float2 x1; float2 n; float2 t; float2 t_norm; float a; float b; float c; float ab_sq; float ab_abs; float ac; float bc; float length; float width; Wall(float2 x0, float2 x1, float2 n) { this->x0 = x0; this->x1 = x1; this->n = n; this->t = make_float2(x1.x - x0.x, x1.y - x0.y); this->length = sqrtf(t.x * t.x + t.y * t.y); this->t_norm = make_float2(t.x / length, t.y / length); this->width = 0.05; // TODO fix later this->a = x1.y - x0.y; this->b = x0.x - x1.x; this->c = -(a * x0.x + b * x0.y); this->ab_sq = a * a + b * b; this->ab_abs = sqrtf(a * a + b * b); this->ac = a * c; this->bc = b * c; } }; class Particle { public: float2 X; float2 X_pred; float2 Delta_x; // becomes Delta_buf int Delta_x_ctr; // becomes Delta_buf_ctr float2 V; float2 V_prev; float V_pref; float2 Accel; float mass; float inv_mass; int group; float2 goal; float r; // grid int cell_id; int cell_x; int cell_y; float3 color; Particle(float2 X, float2 V, float mass, float r, int group, float3 color, float2 goal) { this->X = X; this->X_pred = make_float2(X.x, X.y); this->Delta_x = make_float2(0., 0.); this->Delta_x_ctr = 0; this->V = V; this->Accel = make_float2(0., 0.); this->V_prev = make_float2(0., 0.); this->V_pref = V_PREF_ACCEL; this->mass = mass; this->inv_mass = 1.0 / mass; this->group = group; this->goal = goal; this->r = r; // TODO add cell_id, x, y for multiple grids this->cell_id = K_NOT_USED; this->cell_x = K_NOT_USED; this->cell_y = K_NOT_USED; this->color = color; } }; class Simulation; class Grid { public: static const int max_per_cell = 10; int num_particles; int num_cells; int num_rows; int num_cols; float cell_size; float2 min_; float2 max_; int *grid_counters; // stores num of particles in each cell int **grid_cells; // stores the particles indicies for each cell // has a maximum number of particles per cell uint num_blocks; uint num_threads; Grid(int num_particles, float dummy_cell_size, float2 min_, float2 max_) { this->num_particles = num_particles; this->cell_size = dummy_cell_size; this->min_ = min_; this->max_ = max_; this->num_cells = (max_.x - min_.x) * (max_.y - min_.y); this->num_cols = (max_.x - min_.x) / cell_size; this->num_rows = (max_.y - min_.y) / cell_size; this->grid_counters = (int *)malloc(num_cells * (sizeof(int))); for (int i = 0; i < num_cells; i++) { this->grid_counters[i] = 0; } this->grid_cells = (int **)malloc(num_cells * (sizeof(int *))); for (int i = 0; i < num_cells; i++) { int *particle_indices = (int *)malloc(max_per_cell * (sizeof(int))); for (int j = 0; j < max_per_cell; j++) { particle_indices[j] = 0; } this->grid_cells[i] = particle_indices; } this->num_threads = min(BLOCK_SIZE, num_particles); this->num_blocks = iDivUp(num_particles, this->num_threads); } void update_stability(Particle **particles) // this is a kernel function { // reset\update grid counters for (int i = 0; i < num_cells; i++) { grid_counters[i] = 0; for (int j = 0; j < max_per_cell; j++) { grid_cells[i][j] = K_NOT_USED; } } // adding particles to grid for (int i = 0; i < num_particles; i++) { float2 X = particles[i]->X; int x = (X.x - min_.x) / cell_size; int y = (X.y - min_.y) / cell_size; int cell_id = y * num_rows + x; particles[i]->cell_id = cell_id; particles[i]->cell_x = x; particles[i]->cell_y = y; int tmp = grid_counters[cell_id]; grid_cells[cell_id][tmp] = i; grid_counters[cell_id] += 1; } } bool is_colliding_stability(Particle **particles, int i, int j) const { float2 X = particles[i]->X; int xi = (X.x - min_.x) / cell_size; int yi = (X.y - min_.y) / cell_size; int cell_id_i = yi * num_rows + xi; X = particles[j]->X; int xj = (X.x - min_.x) / cell_size; int yj = (X.y - min_.y) / cell_size; int cell_id_j = yj * num_rows + xj; int is_x_neighbour = xi - xj; int is_y_neighbour = yi - yj; bool res = is_x_neighbour >= -3 && is_x_neighbour <= 3 && is_y_neighbour >= -3 && is_y_neighbour <= 3; return res; } void update(Particle **particles) // this is a kernel function { // reset\update grid counters for (int i = 0; i < num_cells; i++) { grid_counters[i] = 0; for (int j = 0; j < max_per_cell; j++) { grid_cells[i][j] = K_NOT_USED; } } // adding particles to grid for (int i = 0; i < num_particles; i++) { float2 X = particles[i]->X_pred; int x = (X.x - min_.x) / cell_size; int y = (X.y - min_.y) / cell_size; int cell_id = y * num_rows + x; particles[i]->cell_id = cell_id; particles[i]->cell_x = x; particles[i]->cell_y = y; int tmp = grid_counters[cell_id]; grid_cells[cell_id][tmp] = i; grid_counters[cell_id] += 1; } } /*two options: 1) is colliding should be a 2d matrix that we preprocess in the update step then, is colliding just returns true\false based on that matrix 2) have each particle loop around the surronding cells to see if they are colliding */ bool is_colliding(Particle **particles, int i, int j) const { float2 X = particles[i]->X_pred; int xi = (X.x - min_.x) / cell_size; int yi = (X.y - min_.y) / cell_size; int cell_id_i = yi * num_rows + xi; X = particles[j]->X_pred; int xj = (X.x - min_.x) / cell_size; int yj = (X.y - min_.y) / cell_size; int cell_id_j = yj * num_rows + xj; int is_x_neighbour = xi - xj; int is_y_neighbour = yi - yj; bool res = is_x_neighbour >= -3 && is_x_neighbour <= 3 && is_y_neighbour >= -3 && is_y_neighbour <= 3; return res; } ~Grid() { free(grid_counters); for (int i = 0; i < num_cells; i++) { free(grid_cells[i]); } free(grid_cells); } }; class Constraint { // int i1,i2,... praticle indices public: const Simulation *sim; int *indicies; int num_particles; float2 *delta_X; bool active; Constraint(Simulation *sim, int num_particles) { this->sim = sim; this->num_particles = num_particles; this->delta_X = (float2 *)malloc(num_particles * sizeof(float2)); this->indicies = (int *)malloc(num_particles * (sizeof(int))); this->active = false; for (int i = 0; i < num_particles; i++) { delta_X[i] = make_float2(0., 0.); } } virtual void project(Particle **particles) = 0; // forcing implemntation in base class virtual ~Constraint() { free(indicies); free(delta_X); } }; // should be constructed once for each scenerio class PathPlanner { public: int num_particles; Particle **particles; float2 *goals; float2 *velocity_buffer; PathPlanner(int num_particles, Particle **particles) { this->num_particles = num_particles; this->particles = particles; this->velocity_buffer = (float2 *)malloc(sizeof(float2) * num_particles); this->goals = (float2 *)malloc(sizeof(float2) * num_particles); for (int i = 0; i < num_particles; i++) { this->velocity_buffer[i] = make_float2(0., 0.); // this->goals[i]=make_float2(particles[i]->goal.x,particles[i]->goal.y); } } // TODO get current velocity, adjust predicted particle accordinfly for // smoothness void calc_pref_v_force(const int &particle_id) // returns velocity { const Particle *p = this->particles[particle_id]; float2 goal = p->goal; this->velocity_buffer[particle_id].x = goal.x - p->X.x; this->velocity_buffer[particle_id].y = goal.y - p->X.y; const float length = sqrtf(velocity_buffer[particle_id].x * velocity_buffer[particle_id].x + velocity_buffer[particle_id].y * velocity_buffer[particle_id].y); if (length != 0) { this->velocity_buffer[particle_id].x /= length; this->velocity_buffer[particle_id].y /= length; this->velocity_buffer[particle_id].x *= p->V_pref; this->velocity_buffer[particle_id].y *= p->V_pref; } } void calc_velocity(const int &particle_id) // returns velocity { const Particle *p = this->particles[particle_id]; // const float2 goal=p->goal; float2 goal = p->goal; // goal.x=p->X.x; // goal.y=GROUND_HEIGHT; this->velocity_buffer[particle_id].x = goal.x - p->X.x; this->velocity_buffer[particle_id].y = goal.y - p->X.y; const float length = sqrtf(velocity_buffer[particle_id].x * velocity_buffer[particle_id].x + velocity_buffer[particle_id].y * velocity_buffer[particle_id].y); if (length != 0) { this->velocity_buffer[particle_id].x /= length; this->velocity_buffer[particle_id].y /= length; this->velocity_buffer[particle_id].x *= p->V_pref; this->velocity_buffer[particle_id].y *= p->V_pref; // part below needs to be removed // add clamping here! /* this->velocity_buffer[particle_id].x=(1.0-KSI)*particles[particle_id]->V.x +KSI*velocity_buffer[particle_id].x; this->velocity_buffer[particle_id].y=(1.0-KSI)*particles[particle_id]->V.y +KSI*velocity_buffer[particle_id].y; */ // clamping v between iterations /* float max_dv_mag = 0.08; float dv_x=this->velocity_buffer[particle_id].x-particles[particle_id]->V_prev.x; float dv_y=this->velocity_buffer[particle_id].y-particles[particle_id]->V_prev.y; float dv_mag=sqrt(dv_x*dv_x+dv_y*dv_y); if(dv_mag>max_dv_mag) { float mult = (max_dv_mag/dv_mag); this->velocity_buffer[particle_id].x*=mult; this->velocity_buffer[particle_id].y*=mult; //printf("%.3f %.3f\n",dv_mag,mult); } */ } } ~PathPlanner() { free(velocity_buffer); free(goals); } }; class Simulation { public: int num_particles; int num_constraints; float time_step; Constraint **constraints; Particle **particles; PathPlanner *planner; Grid *grid; Grid *stability_grid; FILE *out; std::unordered_map<unsigned long long, Constraint *> collision_map; Constraint **collision_upper_trig_arr; Constraint **powerlaw_upper_trig_arr; Constraint **stability_upper_trig_arr; int step_no; float friction_constraint_stiffness; int num_walls; Wall **walls; Simulation(int num_particles, int num_constraints, float time_step, char *out_path) { this->num_particles = num_particles; this->time_step = time_step; this->particles = (Particle **)malloc(sizeof(void *) * num_particles); this->planner = new PathPlanner(num_particles, this->particles); this->out = fopen(out_path, "w"); this->num_constraints = 0; this->constraints = NULL; this->collision_map = std::unordered_map<unsigned long long, Constraint *>(); this->collision_upper_trig_arr = NULL; this->powerlaw_upper_trig_arr = NULL; this->stability_upper_trig_arr = NULL; this->grid = new Grid(num_particles, 2.66, make_float2(LEFT_BOUND_X - 50, GRID_UP_BOUND - 10), make_float2(RIGHT_BOUND_X + 50, GRID_LOW_BOND)); /* this->stability_grid=new Grid(num_particles,2.16, //5.66, //7.66, make_float2(LEFT_BOUND_X-50,GRID_UP_BOUND-10), make_float2(RIGHT_BOUND_X+50,GRID_LOW_BOND)); */ this->stability_grid = new Grid(num_particles, 2.2, // 5.66, //7.66, make_float2(LEFT_BOUND_X - 50, GRID_UP_BOUND - 10), make_float2(RIGHT_BOUND_X + 50, GRID_LOW_BOND)); this->step_no = 1; this->friction_constraint_stiffness = 0.22f; this->num_walls = 0; this->walls = NULL; } void calc_constraint_stiffness(int n) { // 1.-(1.-0.25)**(4./6) friction_constraint_stiffness = 1.0f - powf(1.0f - friction_constraint_stiffness, (1.0f / n)); } void stabilization() { stability_grid->update_stability(particles); for (int i = 0; i < 1; i++) { for (int i = 0; i < num_particles; i++) { particles[i]->Delta_x.x = 0.; particles[i]->Delta_x.y = 0.; particles[i]->Delta_x_ctr = 0; } // friction constraints for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -1; x <= 1; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < stability_grid->num_cols) { for (int y = -1; y <= 1; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < stability_grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * stability_grid->num_rows); if (stability_grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < stability_grid->grid_counters[cell_id]; idx++) { int j = stability_grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); stability_upper_trig_arr[t_idx]->project(particles); } } } } } } } } // traverse friction constraints to accumalte deltas for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -1; x <= 1; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < stability_grid->num_cols) { for (int y = -1; y <= 1; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < stability_grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * stability_grid->num_rows); if (stability_grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < stability_grid->grid_counters[cell_id]; idx++) { int j = stability_grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); if (stability_upper_trig_arr[t_idx]->active) { for (int ctr = 0; ctr < stability_upper_trig_arr[t_idx]->num_particles; ctr++) { int p_idx = stability_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += stability_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += stability_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } stability_upper_trig_arr[t_idx]->active = false; } } } } } } } } } for (int i = 0; i < num_particles; i++) { if (particles[i]->Delta_x_ctr > 0) { float dx = ALPHA * particles[i]->Delta_x.x / particles[i]->Delta_x_ctr; float dy = ALPHA * particles[i]->Delta_x.y / particles[i]->Delta_x_ctr; particles[i]->X_pred.x += dx; particles[i]->X_pred.y += dy; particles[i]->X.x += dx; particles[i]->X.y += dy; } } } } void project_velocity_constraints() { for (int i = 0; i < num_particles; i++) { particles[i]->Delta_x.x = 0.; particles[i]->Delta_x.y = 0.; particles[i]->Delta_x_ctr = 0; } for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -2; x <= 2; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < grid->num_cols) { for (int y = -2; y <= 2; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * grid->num_rows); if (grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < grid->grid_counters[cell_id]; idx++) { int j = grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); powerlaw_upper_trig_arr[t_idx]->project(particles); } } } } } } } } // traverse friction constraints to accumalte deltas for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -2; x <= 2; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < grid->num_cols) { for (int y = -2; y <= 2; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * grid->num_rows); if (grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < grid->grid_counters[cell_id]; idx++) { int j = grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); if (powerlaw_upper_trig_arr[t_idx]->active) { for (int ctr = 0; ctr < powerlaw_upper_trig_arr[t_idx]->num_particles; ctr++) { int p_idx = powerlaw_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += powerlaw_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += powerlaw_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } powerlaw_upper_trig_arr[t_idx]->active = false; } } } } } } } } } for (int i = 0; i < num_particles; i++) { particles[i]->V.x *= 0.99; particles[i]->V.y *= 0.99; float k = 1.; // 0.05; //stiffness changes with iteration; float2 dv_pref = particles[i]->V_prev; dv_pref.x = k * (planner->velocity_buffer[i].x - particles[i]->V.x); dv_pref.y = k * (planner->velocity_buffer[i].y - particles[i]->V.y); clamp(dv_pref, time_step * MAX_ACCEL); if (particles[i]->Delta_x_ctr > 0) { float dvx = (dv_pref.x + particles[i]->Delta_x.x) / (1. + particles[i]->Delta_x_ctr); float dvy = (dv_pref.y + particles[i]->Delta_x.y) / (1. + particles[i]->Delta_x_ctr); particles[i]->V.x += dvx; particles[i]->V.y += dvy; } else { particles[i]->V.x += dv_pref.x; particles[i]->V.y += dv_pref.y; } // clamp(particles[i]->V,MAX_SPEED); particles[i]->X_pred.x = particles[i]->X.x + particles[i]->V.x * time_step; particles[i]->X_pred.y = particles[i]->X.y + particles[i]->V.y * time_step; // perhaps clamp cannot be here, but rather in the constraints themselves // so to force that each constraint cannot become // TODO also need to clamp maximum speed change clamp } } void project_constraints() { for (int i = 0; i < num_particles; i++) { particles[i]->Delta_x.x = 0.; particles[i]->Delta_x.y = 0.; particles[i]->Delta_x_ctr = 0; } // friction constraints for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -1; x <= 1; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < grid->num_cols) { for (int y = -1; y <= 1; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * grid->num_rows); if (grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < grid->grid_counters[cell_id]; idx++) { int j = grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { // collision_map[i * num_particles + j]->project(particles); int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); collision_upper_trig_arr[t_idx]->project(particles); // powerlaw_upper_trig_arr[t_idx]->project(particles); // stability_upper_trig_arr[t_idx]->project(particles); } } } } } } } } // ground constraints for (int i = 0; i < num_constraints; i++) { constraints[i]->project(particles); } // traverse friction constraints to accumalte deltas for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -1; x <= 1; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < grid->num_cols) { for (int y = -1; y <= 1; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * grid->num_rows); if (grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < grid->grid_counters[cell_id]; idx++) { int j = grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); if (collision_upper_trig_arr[t_idx]->active) { for (int ctr = 0; ctr < collision_upper_trig_arr[t_idx]->num_particles; ctr++) { int p_idx = collision_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += collision_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += collision_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } collision_upper_trig_arr[t_idx]->active = false; } /* if(powerlaw_upper_trig_arr[t_idx]->active) { for(int ctr=0; ctr<powerlaw_upper_trig_arr[t_idx]->num_particles ;ctr++) { int p_idx=powerlaw_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += powerlaw_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += powerlaw_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } powerlaw_upper_trig_arr[t_idx]->active=false; } */ } } } } } } } } /* for(std::unordered_map<unsigned long long, Constraint*>::const_iterator it = collision_map.cbegin(); it != collision_map.cend(); ++it) { if(it->second->active) { //printf("Super Happy active constraint :-)\n"); for(int j=0;j<it->second->num_particles;j++) { int idx=it->second->indicies[j]; particles[idx]->Delta_x.x += it->second->delta_X[j].x; particles[idx]->Delta_x.y += it->second->delta_X[j].y; particles[idx]->Delta_x_ctr++; } } } */ // traverse ground and wall constraints to accumalte deltas for (int i = 0; i < num_constraints; i++) { if (constraints[i]->active) { for (int j = 0; j < constraints[i]->num_particles; j++) { int idx = constraints[i]->indicies[j]; particles[idx]->Delta_x.x += constraints[i]->delta_X[j].x; particles[idx]->Delta_x.y += constraints[i]->delta_X[j].y; particles[idx]->Delta_x_ctr++; } constraints[i]->active = false; } } for (int i = 0; i < num_particles; i++) { if (particles[i]->Delta_x_ctr > 0) { particles[i]->X_pred.x += ALPHA * particles[i]->Delta_x.x / particles[i]->Delta_x_ctr; particles[i]->X_pred.y += ALPHA * particles[i]->Delta_x.y / particles[i]->Delta_x_ctr; // clamp if (false) { float maxValue = 0.069; float length_d_i = distance(particles[i]->X_pred, particles[i]->X); if (length_d_i > maxValue) { float mult = (maxValue / length_d_i); particles[i]->X_pred.x = particles[i]->X.x + (particles[i]->X_pred.x - particles[i]->X.x) * mult; particles[i]->X_pred.y = particles[i]->X.y + (particles[i]->X_pred.y - particles[i]->X.y) * mult; } /* float max_dv_mag = 0.0013; float curr_v_x=((particles[i]->X_pred.x-particles[i]->X.x)/time_step); float curr_v_y=((particles[i]->X_pred.y-particles[i]->X.y)/time_step); float dv_x=curr_v_x-particles[i]->V_prev.x; float dv_y=curr_v_y-particles[i]->V_prev.y; float dv_mag=sqrt(dv_x*dv_x+dv_y*dv_y); if(dv_mag>max_dv_mag) { float mult = (max_dv_mag/dv_mag); particles[i]->X_pred.x=particles[i]->X.x+curr_v_x*mult; particles[i]->X_pred.y=particles[i]->X.y+curr_v_y*mult; //printf("%.3f %.3f\n",dv_mag,mult); } */ } } } } void do_time_step_force() { printf("Force Solve Frame %d\n", step_no); for (int i = 0; i < num_particles; i++) { planner->calc_pref_v_force(i); particles[i]->V_prev.x = planner->velocity_buffer[i].x; particles[i]->V_prev.y = planner->velocity_buffer[i].y; } // TODO change grid cell size stability_grid->update_stability(particles); // update grid by current positions // TODO calculate preffered speed //_vPref *= _prefSpeed/sqrtf(distSqToGoal); for (int i = 0; i < 1; i++) { for (int i = 0; i < num_particles; i++) { particles[i]->Delta_x.x = 0.; particles[i]->Delta_x.y = 0.; particles[i]->Delta_x_ctr = 0; } for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -3; x <= 3; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < stability_grid->num_cols) { for (int y = -3; y <= 3; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < stability_grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * stability_grid->num_rows); if (stability_grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < stability_grid->grid_counters[cell_id]; idx++) { int j = stability_grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); // printf("Will project %d %d\n",i,j); stability_upper_trig_arr[t_idx]->project(particles); } } } } } } } } for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -3; x <= 3; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < stability_grid->num_cols) { for (int y = -3; y <= 3; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < stability_grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * stability_grid->num_rows); if (stability_grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < stability_grid->grid_counters[cell_id]; idx++) { int j = stability_grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); if (stability_upper_trig_arr[t_idx]->active) { for (int ctr = 0; ctr < stability_upper_trig_arr[t_idx]->num_particles; ctr++) { int p_idx = stability_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += stability_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += stability_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } stability_upper_trig_arr[t_idx]->active = false; } } } } } } } } } for (int i = 0; i < num_particles; i++) { particles[i]->Accel.x = (particles[i]->V_prev.x - particles[i]->V.x) / KSI_ACCEL; particles[i]->Accel.y = (particles[i]->V_prev.y - particles[i]->V.y) / KSI_ACCEL; if (particles[i]->Delta_x_ctr > 0) { /* define force constraint checks distance, tao. if distance < Nearest Neighbour calculate Delta_force for each particle */ // accumalte and average delta forces for each particle // apply acceleration clamp // update velocity and positions // printf("In particles step %d\n",i); particles[i]->Accel.x += particles[i]->Delta_x.x / particles[i]->Delta_x_ctr; particles[i]->Accel.y += particles[i]->Delta_x.y / particles[i]->Delta_x_ctr; clamp(particles[i]->Accel, MAX_ACCEL); } particles[i]->V.x += particles[i]->Accel.x * time_step; particles[i]->V.y += particles[i]->Accel.y * time_step; printf("%d Speed %.4f\n", i, sqrtf(particles[i]->V.x * particles[i]->V.x + particles[i]->V.y * particles[i]->V.y)); particles[i]->X.x += particles[i]->V.x * time_step; particles[i]->X.y += particles[i]->V.y * time_step; } } step_no++; } void do_time_step() { printf("PBD Solve Frame %d\n", step_no); for (int i = 0; i < num_particles; i++) { planner->calc_velocity(i); // particles[i]->V.x=planner->velocity_buffer[i].x; // particles[i]->V.y=planner->velocity_buffer[i].y; particles[i]->V_prev.x = particles[i]->V.x; particles[i]->V_prev.y = particles[i]->V.y; // For sand simulation: // particles[i]->V.y+=time_step*9.81; //times mass TODO? // particles[i]->V.x*=0.99; // particles[i]->V.y*=0.99; // printf("particle %d // speed=(%.2f,%.2f)\n",i,particles[i]->V.x,particles[i]->V.y); } for (int i = 0; i < num_particles; i++) { particles[i]->X_pred.x += time_step * particles[i]->V.x; particles[i]->X_pred.y += time_step * particles[i]->V.y; } //----------------------stability grid stuff // stabilization(); //-----------------------project constraints grid->update(particles); project_velocity_constraints(); for (int i = 1; i < (ITER_COUNT + 1); i++) { calc_constraint_stiffness(i); project_constraints(); } for (int i = 0; i < num_particles; i++) { float dx = particles[i]->X_pred.x - particles[i]->X.x; float dy = particles[i]->X_pred.y - particles[i]->X.y; particles[i]->V.x = dx / time_step; particles[i]->V.y = dy / time_step; particles[i]->X.x = particles[i]->X_pred.x; particles[i]->X.y = particles[i]->X_pred.y; } step_no++; } ~Simulation() { fclose(this->out); for (int i = 0; i < num_particles; i++) { delete particles[i]; } for (int i = 0; i < num_constraints; i++) { delete constraints[i]; } /* for (std::unordered_map<unsigned long long, Constraint*>::const_iterator it = collision_map.begin(); it != collision_map.end(); ++it) { delete it->second; } */ if (walls != NULL) { for (int i = 0; i < num_walls; i++) { delete walls[i]; } } int trig_len = 1 + (num_particles * (num_particles + 1) / 2); for (int i = 0; i < num_particles; i++) { for (int j = 0; j < num_particles; j++) { if (i < j) { int r = i; int c = j; int t_idx = (num_particles * r) + c - (r * (r + 1) * 0.5); if (collision_upper_trig_arr != NULL) { delete collision_upper_trig_arr[t_idx]; } if (powerlaw_upper_trig_arr != NULL) { delete powerlaw_upper_trig_arr[t_idx]; } if (stability_upper_trig_arr != NULL) { delete stability_upper_trig_arr[t_idx]; } } } } free(constraints); free(particles); free(collision_upper_trig_arr); free(powerlaw_upper_trig_arr); delete planner; delete grid; } }; class Stability_Constraint : public Constraint { public: int i; int j; float w_i_coef; float w_j_coef; float2 contact_normal; float2 tangential_displacement; float2 x_pred_w_delta1; float2 x_pred_w_delta2; float2 out; float collision_margin; Stability_Constraint(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; // TODO if seg fault happens, it is because the particles are set up after // the constraints this->w_i_coef = sim->particles[i]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->w_j_coef = -sim->particles[j]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->contact_normal = make_float2(0.0, 0.0); this->tangential_displacement = make_float2(0.0, 0.0); this->x_pred_w_delta1 = make_float2(0., 0.); this->x_pred_w_delta2 = make_float2(0., 0.); this->out = make_float2(0., 0.); } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; if (true) { float d = distance(particles[i]->X, particles[j]->X); float f = d - collision_margin; if (f < 0) { contact_normal.x = 0.; contact_normal.y = 0.; tangential_displacement.x = 0.; tangential_displacement.y = 0.; x_pred_w_delta1.x = 0.; x_pred_w_delta1.y = 0.; x_pred_w_delta2.x = 0.; x_pred_w_delta2.y = 0.; out.x = 0.; out.y = 0.; contact_normal.x = (particles[i]->X.x - particles[j]->X.x) / d; contact_normal.y = (particles[i]->X.y - particles[j]->X.y) / d; delta_X[0].x = -w_i_coef * contact_normal.x * f; delta_X[0].y = -w_i_coef * contact_normal.y * f; delta_X[1].x = -w_j_coef * contact_normal.x * f; delta_X[1].y = -w_j_coef * contact_normal.y * f; active = true; } } } }; class Powerlaw_Force_Constraint : public Constraint { public: static const float k = 1.5f; // stiffness static const float m = 2.0f; static const float tao0 = 3.f; int i; int j; float w_i_coef; float w_j_coef; float2 out; float collision_margin; float radius_init; float radius_sq_init; float delta_t; float dv_i; float dv_j; Powerlaw_Force_Constraint(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; this->w_i_coef = sim->particles[i]->inv_mass; this->w_j_coef = sim->particles[j]->inv_mass; this->out = make_float2(0., 0.); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->radius_init = (sim->particles[i]->r + sim->particles[j]->r); this->radius_sq_init = radius_init * radius_init; this->delta_t = sim->time_step; this->dv_i = 1. / delta_t; this->dv_j = -1. / delta_t; } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; float2 x_i = particles[i]->X; float2 x_j = particles[j]->X; const float dist = distance(particles[i]->X, particles[j]->X); float radius_sq = radius_sq_init; if (dist < radius_init) { radius_sq = (radius_init - dist) * (radius_init - dist); } /* float v_x=(particles[i]->X_pred.x-x_i.x)/delta_t -(particles[j]->X_pred.x-x_j.x)/delta_t; float v_y=(particles[i]->X_pred.y-x_i.y)/delta_t -(particles[j]->X_pred.y-x_j.y)/delta_t; */ float v_x = particles[i]->V.x - particles[j]->V.x; float v_y = particles[i]->V.y - particles[j]->V.y; float x0 = x_i.x - x_j.x; float y0 = x_i.y - x_j.y; float v_sq = v_x * v_x + v_y * v_y; float x0_sq = x0 * x0; float y0_sq = y0 * y0; float x_sq = x0_sq + y0_sq; float a = v_sq; float b = -v_x * x0 - v_y * y0; float b_sq = b * b; float c = x_sq - radius_sq; float d_sq = b_sq - a * c; if (d_sq > 0 && (a < -_EPSILON || a > _EPSILON)) { float d = sqrtf(d_sq); float tao = (b - d) / a; if (dist < NN_ACCEL && tao > 0) { float c_x_nom = (v_sq * x0 + b * v_x) / d; float c_x = v_x - c_x_nom; float c_y_nom = (v_sq * y0 + b * v_y) / d; float c_y = v_y - c_y_nom; float F_s = -k * exp(-tao / tao0) / (a * powf(tao, m)) * (m / tao + 1. / tao0); float F_x = c_x * F_s; float F_y = c_y * F_s; delta_X[0].x = F_x; delta_X[0].y = F_y; delta_X[1].x = -F_x; delta_X[1].y = -F_y; active = true; } } } }; class Powerlaw_Constraint : public Constraint { public: static const float k = 1.5; // stiffness static const float tao0 = 4.; static const float maxValue = 0.2; // delta_t * pref_speed int i; int j; float w_i_coef; float w_j_coef; float2 out; float collision_margin; float radius_init; float radius_sq_init; float delta_t; float dv_i; float dv_j; float max_acceleration; Powerlaw_Constraint(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; // TODO if seg fault happens, it is because the particles are set up after // the constraints this->w_i_coef = sim->particles[i]->inv_mass; this->w_j_coef = sim->particles[j]->inv_mass; this->out = make_float2(0., 0.); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->radius_init = (sim->particles[i]->r + sim->particles[j]->r); this->radius_sq_init = radius_init * radius_init; this->delta_t = sim->time_step; this->dv_i = 1.; // 1./delta_t; this->dv_j = -1.; //-1./delta_t; this->max_acceleration = sim->time_step * MAX_ACCEL; } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; float2 x_i = particles[i]->X; float2 x_j = particles[j]->X; const float dist = distance(particles[i]->X, particles[j]->X); float radius_sq = radius_sq_init; if (dist < radius_init) { radius_sq = (radius_init - dist) * (radius_init - dist); } float v_x = (particles[i]->X_pred.x - x_i.x) / delta_t - (particles[j]->X_pred.x - x_j.x) / delta_t; float v_y = (particles[i]->X_pred.y - x_i.y) / delta_t - (particles[j]->X_pred.y - x_j.y) / delta_t; float x0 = x_i.x - x_j.x; float y0 = x_i.y - x_j.y; float v_sq = v_x * v_x + v_y * v_y; float x0_sq = x0 * x0; float y0_sq = y0 * y0; float x_sq = x0_sq + y0_sq; float a = v_sq; float b = -v_x * x0 - v_y * y0; float b_sq = b * b; float c = x_sq - radius_sq; float d_sq = b_sq - a * c; if (false && d_sq > 0 && (a < -_EPSILON || a > _EPSILON)) { float d = sqrtf(d_sq); float tao = (b - d) / a; float tao_alt = (b + d) / a; // pick the min solution that is > 0 tao = tao_alt < tao && tao_alt > 0 ? tao_alt : tao; // need to consider +- sign perhaps? if (tao > 0 /* && tao<tao0 */) { float clamp_tao = exp(-tao * tao / tao0); float c_tao = abs(tao - tao0); float tao_sq = c_tao * c_tao; float grad_x_i = 2 * c_tao * ((dv_i / a) * ((-2. * v_x * tao) - (x0 + (v_y * x0 * y0 + v_x * (radius_sq - y0_sq)) / d))); float grad_y_i = 2 * c_tao * ((dv_i / a) * ((-2. * v_y * tao) - (y0 + (v_x * x0 * y0 + v_y * (radius_sq - x0_sq)) / d))); float grad_x_j = -grad_x_i; float grad_y_j = -grad_y_i; float stiff = exp(-tao * tao / tao0); float s = 0.5 * tao_sq / (particles[i]->inv_mass * (grad_y_i * grad_y_i + grad_x_i * grad_x_i) + particles[j]->inv_mass * (grad_y_j * grad_y_j + grad_x_j * grad_x_j)); active = true; delta_X[0].x = s * w_i_coef * grad_x_i; delta_X[0].y = s * w_i_coef * grad_y_i; clamp(delta_X[0], max_acceleration); delta_X[1].x = s * w_j_coef * grad_x_j; delta_X[1].y = s * w_j_coef * grad_y_j; clamp(delta_X[1], max_acceleration); } if (false && tao > 0) { float clamp_tao = exp(-tao * tao / tao0); float c_x_nom = (v_sq * x0 + b * v_x) / d; float c_x = v_x - c_x_nom; float c_y_nom = (v_sq * y0 + b * v_y) / d; float c_y = v_y - c_y_nom; float grad_x_i = dv_i + (v_y * y0 * dv_i / d) + ((v_y * x0 * y0 + (radius_sq - y0_sq) * v_x) * dv_i * c_x_nom / d_sq); float grad_y_i = dv_i + (v_x * x0 * dv_i / d) + ((v_x * x0 * y0 + (radius_sq - x0_sq) * v_y) * dv_i * c_y_nom / d_sq); float grad_x_j = dv_j + (v_y * y0 * dv_j / d) + ((v_y * x0 * y0 + (radius_sq - y0_sq) * v_x) * dv_j * c_x_nom / d_sq); float grad_y_j = dv_j + (v_x * x0 * dv_j / d) + ((v_x * x0 * y0 + (radius_sq - x0_sq) * v_y) * dv_j * c_y_nom / d_sq); grad_x_i *= 2 * abs(c_x); grad_y_i *= 2 * abs(c_y); grad_x_j *= 2 * abs(c_x); grad_y_j *= 2 * abs(c_y); c_x *= c_x; c_y *= c_y; float s_fin = c_x + c_y; float stiff = ((2. / tao) + (1. / tao0)) * k * exp(-tao / tao0) / (v_sq * tao * tao); // float s_x=stiff*c_x/(particles[i]->inv_mass*grad_x_i*grad_x_i // +particles[j]->inv_mass*grad_x_j*grad_x_j); // float s_y=stiff*c_y/(particles[i]->inv_mass*grad_y_i*grad_y_i // +particles[j]->inv_mass*grad_y_j*grad_y_j); // s_x*=s_x; // s_y*=s_y; stiff = exp(-tao * tao / tao0); float s = 0.39 * s_fin / (particles[i]->inv_mass * (grad_y_i * grad_y_i + grad_x_i * grad_x_i) + particles[j]->inv_mass * (grad_y_j * grad_y_j + grad_x_j * grad_x_j)); // grad_y_i=%f\n",stiff,s,grad_x_i,grad_y_i); active = true; delta_X[0].x = s * w_i_coef * grad_x_i; delta_X[0].y = s * w_i_coef * grad_y_i; clamp(delta_X[0], max_acceleration); delta_X[1].x = s * w_j_coef * grad_x_j; delta_X[1].y = s * w_j_coef * grad_y_j; clamp(delta_X[1], max_acceleration); } } } }; class Friction_Constraint2 : public Constraint { public: // usually easier to keep object moving than start movement, so mu_s>mu_k // some friction results // http://hypertextbook.com/facts/2007/TabraizRasul.shtml static const float mui_static = 0.0f; // 0.00023; //0.021; static const float mui_kinematic = 0.0f; // 0.00017; //0.02; /*typical values: http://spiff.rit.edu/classes/phys211/lectures/fric/fric_all.html smooth 0.05 medium 0.3 rough 1.0 */ int i; int j; float w_i_coef; float w_j_coef; float2 contact_normal; float2 tangential_displacement; float2 x_pred_w_delta1; float2 x_pred_w_delta2; float2 out; float collision_margin; Friction_Constraint2(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; // TODO if seg fault happens, it is because the particles are set up after // the constraints this->w_i_coef = sim->particles[i]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->w_j_coef = -sim->particles[j]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->contact_normal = make_float2(0.0, 0.0); this->tangential_displacement = make_float2(0.0, 0.0); this->x_pred_w_delta1 = make_float2(0., 0.); this->x_pred_w_delta2 = make_float2(0., 0.); this->out = make_float2(0., 0.); } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; if (true) { float d = distance(particles[i]->X_pred, particles[j]->X_pred); float f = d - collision_margin; if (f < 0) { contact_normal.x = 0.; contact_normal.y = 0.; tangential_displacement.x = 0.; tangential_displacement.y = 0.; x_pred_w_delta1.x = 0.; x_pred_w_delta1.y = 0.; x_pred_w_delta2.x = 0.; x_pred_w_delta2.y = 0.; out.x = 0.; out.y = 0.; contact_normal.x = (particles[i]->X_pred.x - particles[j]->X_pred.x) / d; contact_normal.y = (particles[i]->X_pred.y - particles[j]->X_pred.y) / d; delta_X[0].x = -w_i_coef * contact_normal.x * f; delta_X[0].y = -w_i_coef * contact_normal.y * f; delta_X[1].x = -w_j_coef * contact_normal.x * f; delta_X[1].y = -w_j_coef * contact_normal.y * f; x_pred_w_delta1.x = delta_X[0].x + particles[i]->X_pred.x; x_pred_w_delta1.y = delta_X[0].y + particles[i]->X_pred.y; x_pred_w_delta2.x = delta_X[1].x + particles[j]->X_pred.x; x_pred_w_delta2.y = delta_X[1].y + particles[j]->X_pred.y; float n_norm = distance(x_pred_w_delta1, x_pred_w_delta2); contact_normal.y = (x_pred_w_delta1.x - x_pred_w_delta2.x) / n_norm; contact_normal.x = -(x_pred_w_delta1.y - x_pred_w_delta2.y) / n_norm; // tangential_displacement.x = x_pred_w_delta1.x-x_pred_w_delta2.x; // tangential_displacement.y = x_pred_w_delta1.y-x_pred_w_delta2.y; // Above might be wrong // should be tangential_displacement.x = x_pred_w_delta1.x - particles[i]->X.x - (x_pred_w_delta2.x - particles[j]->X.x); tangential_displacement.y = x_pred_w_delta1.y - particles[i]->X.y - (x_pred_w_delta2.y - particles[j]->X.y); project_on_vector(tangential_displacement, contact_normal, out); float out_norm = norm(out); if (out_norm >= mui_static * d) { float coef = min(1., mui_kinematic * d / out_norm); out.x *= coef; out.y *= coef; } delta_X[0].x += -out.x * w_i_coef; delta_X[0].y += -out.y * w_i_coef; delta_X[1].x += -out.x * w_j_coef; delta_X[1].y += -out.y * w_j_coef; active = true; } } } }; class Friction_Constraint : public Constraint { public: // usually easier to keep object moving than start movement, so mu_s>mu_k // some friction results // http://hypertextbook.com/facts/2007/TabraizRasul.shtml static const float mui_static = 0.00026; // 0.021; static const float mui_kinematic = 0.00023; // 0.02; /*typical values: http://spiff.rit.edu/classes/phys211/lectures/fric/fric_all.html smooth 0.05 medium 0.3 rough 1.0 */ int i; int j; float w_i_coef; float w_j_coef; float2 contact_normal; float2 tangential_displacement; float2 x_pred_w_delta1; float2 x_pred_w_delta2; float2 out; float collision_margin; float radius_sq_init; float radius_init; float delta_t; Friction_Constraint(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; // TODO if seg fault happens, it is because the particles are set up after // the constraints this->w_i_coef = sim->particles[i]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->w_j_coef = -sim->particles[j]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->contact_normal = make_float2(0.0, 0.0); this->tangential_displacement = make_float2(0.0, 0.0); this->x_pred_w_delta1 = make_float2(0., 0.); this->x_pred_w_delta2 = make_float2(0., 0.); this->out = make_float2(0., 0.); this->radius_init = (sim->particles[i]->r + sim->particles[j]->r); this->radius_sq_init = radius_init * radius_init; this->delta_t = sim->time_step; } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; float d = distance(particles[i]->X_pred, particles[j]->X_pred); float f = d - collision_margin; if (f < 0) { contact_normal.x = 0.; contact_normal.y = 0.; tangential_displacement.x = 0.; tangential_displacement.y = 0.; x_pred_w_delta1.x = 0.; x_pred_w_delta1.y = 0.; x_pred_w_delta2.x = 0.; x_pred_w_delta2.y = 0.; out.x = 0.; out.y = 0.; contact_normal.x = (particles[i]->X_pred.x - particles[j]->X_pred.x) / d; contact_normal.y = (particles[i]->X_pred.y - particles[j]->X_pred.y) / d; delta_X[0].x = -w_i_coef * contact_normal.x * f; delta_X[0].y = -w_i_coef * contact_normal.y * f; delta_X[1].x = -w_j_coef * contact_normal.x * f; delta_X[1].y = -w_j_coef * contact_normal.y * f; x_pred_w_delta1.x = delta_X[0].x + particles[i]->X_pred.x; x_pred_w_delta1.y = delta_X[0].y + particles[i]->X_pred.y; x_pred_w_delta2.x = delta_X[1].x + particles[j]->X_pred.x; x_pred_w_delta2.y = delta_X[1].y + particles[j]->X_pred.y; float n_norm = distance(x_pred_w_delta1, x_pred_w_delta2); contact_normal.y = (x_pred_w_delta1.x - x_pred_w_delta2.x) / n_norm; contact_normal.x = -(x_pred_w_delta1.y - x_pred_w_delta2.y) / n_norm; // tangential_displacement.x = x_pred_w_delta1.x-x_pred_w_delta2.x; // tangential_displacement.y = x_pred_w_delta1.y-x_pred_w_delta2.y; // Above might be wrong // should be tangential_displacement.x = x_pred_w_delta1.x - particles[i]->X.x - (x_pred_w_delta2.x - particles[j]->X.x); tangential_displacement.y = x_pred_w_delta1.y - particles[i]->X.y - (x_pred_w_delta2.y - particles[j]->X.y); project_on_vector(tangential_displacement, contact_normal, out); float out_norm = norm(out); if (out_norm >= mui_static * d) { float coef = min(1., mui_kinematic * d / out_norm); out.x *= coef; out.y *= coef; } delta_X[0].x += -out.x * w_i_coef; delta_X[0].y += -out.y * w_i_coef; delta_X[1].x += -out.x * w_j_coef; delta_X[1].y += -out.y * w_j_coef; active = true; } else { float2 x_i = particles[i]->X; float2 x_j = particles[j]->X; const float dist = distance(particles[i]->X, particles[j]->X); float radius_sq = radius_sq_init; if (dist < radius_init) { radius_sq = (radius_init - dist) * (radius_init - dist); } const float v_ix = (particles[i]->X_pred.x - x_i.x) / delta_t; const float v_jx = (particles[j]->X_pred.x - x_j.x) / delta_t; const float v_x = v_ix - v_jx; const float v_iy = (particles[i]->X_pred.y - x_i.y) / delta_t; const float v_jy = (particles[j]->X_pred.y - x_j.y) / delta_t; const float v_y = v_iy - v_jy; float x0 = x_i.x - x_j.x; float y0 = x_i.y - x_j.y; float v_sq = v_x * v_x + v_y * v_y; float x0_sq = x0 * x0; float y0_sq = y0 * y0; float x_sq = x0_sq + y0_sq; float a = v_sq; float b = -v_x * x0 - v_y * y0; float b_sq = b * b; float c = x_sq - radius_sq; float d_sq = b_sq - a * c; if (d_sq > 0 && (a < -_EPSILON || a > _EPSILON)) { float d = sqrtf(d_sq); float tao = (b - d) / a; float tao_alt = (b + d) / a; // pick the min solution that is > 0 tao = tao_alt < tao && tao_alt > 0 ? tao_alt : tao; // need to consider +- sign perhaps? if (tao > 0) { // const float min_tao_init=b/v_sq; const float min_tao = tao + delta_t; // min_tao_init;//(min_tao_init+tao)/2; const float x_i_min = x_i.x + min_tao * v_ix; const float y_i_min = x_i.y + min_tao * v_iy; const float x_j_min = x_j.x + min_tao * v_jx; const float y_j_min = x_j.y + min_tao * v_jy; float min_tao_dist = sqrtf((x_i_min - x_j_min) * (x_i_min - x_j_min) + (y_i_min - y_j_min) * (y_i_min - y_j_min)); float d = min_tao_dist; float f = d - collision_margin; if (f < 0 && d > _EPSILON) { const float clamp_tao = exp(-min_tao * min_tao / 5.); const float k = sim->friction_constraint_stiffness; // 0.25; contact_normal.x = 0.; contact_normal.y = 0.; tangential_displacement.x = 0.; tangential_displacement.y = 0.; x_pred_w_delta1.x = 0.; x_pred_w_delta1.y = 0.; x_pred_w_delta2.x = 0.; x_pred_w_delta2.y = 0.; out.x = 0.; out.y = 0.; contact_normal.x = (x_i_min - x_j_min) / d; contact_normal.y = (y_i_min - y_j_min) / d; delta_X[0].x = -k * clamp_tao * w_i_coef * contact_normal.x * f; delta_X[0].y = -k * clamp_tao * w_i_coef * contact_normal.y * f; delta_X[1].x = -k * clamp_tao * w_j_coef * contact_normal.x * f; delta_X[1].y = -k * clamp_tao * w_j_coef * contact_normal.y * f; active = true; const float x_i_tao = x_i.x + tao * v_ix; const float y_i_tao = x_i.y + tao * v_iy; const float x_j_tao = x_j.x + tao * v_jx; const float y_j_tao = x_j.y + tao * v_jy; x_pred_w_delta1.x = delta_X[0].x + x_i_min; x_pred_w_delta1.y = delta_X[0].y + y_i_min; x_pred_w_delta2.x = delta_X[1].x + x_j_min; x_pred_w_delta2.y = delta_X[1].y + y_j_min; float n_norm = distance(x_pred_w_delta1, x_pred_w_delta2); contact_normal.y = (x_pred_w_delta1.x - x_pred_w_delta2.x) / n_norm; contact_normal.x = -(x_pred_w_delta1.y - x_pred_w_delta2.y) / n_norm; tangential_displacement.x = x_pred_w_delta1.x - x_i_tao - (x_pred_w_delta2.x - x_j_tao); tangential_displacement.y = x_pred_w_delta1.y - y_i_tao - (x_pred_w_delta2.y - y_j_tao); project_on_vector(tangential_displacement, contact_normal, out); float out_norm = norm(out); if (out_norm >= mui_static * d) { float coef = min(1., mui_kinematic * d / out_norm); out.x *= coef; out.y *= coef; } delta_X[0].x += -out.x * w_i_coef; delta_X[0].y += -out.y * w_i_coef; delta_X[1].x += -out.x * w_j_coef; delta_X[1].y += -out.y * w_j_coef; active = true; } } } } } }; class Wall_Constraint : public Constraint { public: // static const float ground_height=GROUND_HEIGHT; int i; // F=mui Fn = mui mg . // static const float // kinematic_friction=30.7*9.81*MS_PER_UPDATE*MS_PER_UPDATE; static const float kinematic_friction = 30000.7 * 9.81 * MS_PER_UPDATE * MS_PER_UPDATE; int wall_idx; float collision_margin; float2 contact_normal; Wall_Constraint(Simulation *sim, int i, int wall_idx) : Constraint(sim, 1) { this->i = i; this->indicies[0] = i; this->wall_idx = wall_idx; this->collision_margin = (sim->particles[i]->r + sim->walls[wall_idx]->width) * 1.05; this->contact_normal = make_float2(0., 0.); } virtual void project(Particle **particles) { delta_X[0].x = 0.0; delta_X[0].y = 0.0; Wall *wall = sim->walls[wall_idx]; float2 p = particles[i]->X_pred; float2 p_prev = particles[i]->X; const float x_hit = (wall->b * (wall->b * p.x - wall->a * p.y) - wall->ac) / wall->ab_sq; // check if between x0,x1 const float chk = (x_hit - wall->x0.x) / wall->t.x; if (chk <= 1 && chk >= 0) { // float d = distance(particles[i]->X, particles[j]->X); // float f = d - collision_margin; const float y_hit = (wall->a * (-wall->b * p.x + wall->a * p.y) - wall->bc) / wall->ab_sq; // const float s=a*p.x+b*p.y+c; const float d = abs(wall->a * p.x + wall->b * p.y + wall->c) / wall->ab_abs; float f = d - collision_margin; if (f < 0) { contact_normal.x = (p.x - x_hit) / d; contact_normal.y = (p.y - y_hit) / d; delta_X[0].x = -contact_normal.x * f; delta_X[0].y = -contact_normal.y * f; active = true; } /* vec2 dir = start - end; float lngth = length(dir); dir /= lngth; vec2 proj = max(0.0, min(lngth, dot((start - p), dir))) * dir; return length( (start - p) - proj ) - (width / 2.0); */ /* const float y_hit=(wall->a*(-wall->b*p.x+wall->a*p.y)-wall->bc)/wall->ab_sq; //const float s=a*p.x+b*p.y+c; //const float dist=abs(s)/ab_abs; //TODO check if ray p_prev-p lies inside object const float c_p=(p_prev.x-x_hit)*wall->n.x+(p_prev.y-y_hit)*wall->n.y; if(c_p<0) { float s=c_p/(wall->n.x*wall->n.x+wall->n.y*wall->n.y); //project (figure out how to add distance later delta_X[0].x=-wall->n.x*s; delta_X[0].y=-wall->n.y*s; active=false; } */ } } }; // TODO class Ground_Constraint : public Constraint { public: // static const float ground_height=GROUND_HEIGHT; int i; // F=mui Fn = mui mg . // static const float // kinematic_friction=30.7*9.81*MS_PER_UPDATE*MS_PER_UPDATE; static const float kinematic_friction = 30000.7 * 9.81 * MS_PER_UPDATE * MS_PER_UPDATE; Ground_Constraint(Simulation *sim, int i) : Constraint(sim, 1) { this->i = i; this->indicies[0] = i; } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; float f = distance_ground(particles[i]->X_pred, particles[i]->r, GROUND_HEIGHT); if (f < 0) { // particles[i]->Delta_x_ctr+=1; //TODO remove float x_d = particles[i]->X_pred.x - particles[i]->X.x; if (x_d > kinematic_friction) // if(particles[i]->V.x>kinematic_friction) { delta_X[0].x = -kinematic_friction; } else { delta_X[0].x = -x_d; } delta_X[0].y = f; active = true; } } }; //---------------------------------------- double GetSeconds() { // Figure out time elapsed since last call to idle function static struct timeval last_idle_time; static double time = 0.0; struct timeval time_now; gettimeofday(&time_now, NULL); if (last_idle_time.tv_usec == 0) last_idle_time = time_now; float dt = (float)(time_now.tv_sec - last_idle_time.tv_sec) + 1.0e-6 * (time_now.tv_usec - last_idle_time.tv_usec); time += dt; last_idle_time = time_now; return time; } static void error_callback(int error, const char *description) { fprintf(stderr, "Error %d: %s\n", error, description); } void set_camera(int display_w, int display_h, double rotate_camera) { glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(36, (double)display_w / (double)display_h, 0.1, 3000); gluLookAt(0, 180, 50, 0, 0, 0, 0, 1, 0); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); } void drawCircle(float cx, float cy, float r, int num_segments, float3 color) { glColor3f((GLfloat)color.x, (GLfloat)color.y, (GLfloat)color.z); glBegin(GL_POLYGON); for (int ii = 0; ii < num_segments; ii++) { float theta = 2.0f * 3.1635926f * float(ii) / float(num_segments); // get the current angle float x = r * cosf(theta); // calculate the x component float y = r * sinf(theta); // calculate the y component glVertex3f(x + cx, 2., y + cy); // output vertex } glEnd(); } void drawDirection(float cx, float cy, float dirx, float diry) { glColor3f((GLfloat)0.0, (GLfloat)0.0, (GLfloat)0.0); glLineWidth(6.0); // default is 1f glBegin(GL_LINES); glVertex3f(cx, 2., cy); glVertex3f(dirx + cx, 2., diry + cy); glEnd(); } void drawGround(float x1, float y1, float x2, float y2) { glColor3f((GLfloat)0.0, (GLfloat)0.0, (GLfloat)0.0); glLineWidth(4.0); // default is 1f glBegin(GL_LINES); glVertex3f(x1, 2., y1); glVertex3f(x2, 2., y2); glEnd(); } void write_config(Simulation *sim) { FILE *fp_out; std::string path = std::string(OUT_PATH) + std::string("config.txt"); fp_out = fopen(path.c_str(), "w"); if (fp_out != NULL) { fprintf(fp_out, "time_step %f\n", sim->time_step); for (int i = 0; i < sim->num_particles; i++) { fprintf( fp_out, "particle_id %d\tradius %f\tgoal %.3f %.3f\tcolor %.3f %.3f %.3f\n", i, sim->particles[i]->r, sim->particles[i]->goal.x, sim->particles[i]->goal.y, sim->particles[i]->color.x, sim->particles[i]->color.y, sim->particles[i]->color.z); } } fclose(fp_out); } void write_to_file(Simulation *sim) { FILE *fp_out; std::string frame = std::to_string(sim->step_no); std::string path = std::string(OUT_PATH) + frame + std::string(".txt"); fp_out = fopen(path.c_str(), "w"); if (fp_out != NULL) { for (int i = 0; i < sim->num_particles; i++) { fprintf(fp_out, "%d\t%.5f\t%.5f\n", i, sim->particles[i]->X.x, sim->particles[i]->X.y); } } fclose(fp_out); } void update(Simulation *sim) { write_to_file(sim); sim->do_time_step(); // sim->do_time_step_force(); } void draw_particles(Simulation *sim) { for (int i = 0; i < sim->num_particles; i++) { drawCircle(sim->particles[i]->X.x, sim->particles[i]->X.y, 1, 15, sim->particles[i]->color); /* drawDirection(sim->particles[i]->X.x,sim->particles[i]->X.y, sim->planner->velocity_buffer[i].x,sim->planner->velocity_buffer[i].y); */ } for (int i = 0; i < sim->num_walls; i++) { Wall *w = sim->walls[i]; drawGround(w->x0.x, w->x0.y, w->x1.x, w->x1.y); } drawGround(-1000.0, GROUND_HEIGHT, 1000.0, GROUND_HEIGHT); drawGround(-1000.0, GRID_UP_BOUND, 1000.0, GRID_UP_BOUND); drawGround(LEFT_BOUND_X, GROUND_HEIGHT, LEFT_BOUND_X, GROUND_HEIGHT - 1000); drawGround(RIGHT_BOUND_X, GROUND_HEIGHT, RIGHT_BOUND_X, GROUND_HEIGHT - 1000); } int render(Simulation *sim) { int display_w, display_h; double prev_time = 0.0; double elapsed = 0.0; double cur_time = 0.0; double lag = 0.0; BYTE pixels[3 * WIDTH * HEIGHT]; glfwSetErrorCallback(error_callback); if (!glfwInit()) return 1; GLFWwindow *window = glfwCreateWindow(WIDTH, HEIGHT, "ImGui Crowd Sim", NULL, NULL); glfwMakeContextCurrent(window); glEnable(GL_DEPTH_TEST); // Depth Testing glDepthFunc(GL_LEQUAL); glDisable(GL_CULL_FACE); glCullFace(GL_BACK); glfwGetWindowSize(window, &display_w, &display_h); glViewport(0, 0, display_w, display_h); glm::vec3 const &clear_color = glm::vec3(0.827f, 0.827f, 0.827f); prev_time = GetSeconds(); // Main loop int i = 0; while (!glfwWindowShouldClose(window)) { glLoadIdentity(); cur_time = GetSeconds(); elapsed = cur_time - prev_time; prev_time = cur_time; lag += elapsed; while (lag >= MS_PER_UPDATE) { lag -= MS_PER_UPDATE; update(sim); } glClearColor(clear_color.x, clear_color.y, clear_color.z, 0.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); draw_particles(sim); set_camera(display_w, display_h, 0.0 /*rotate_camera+=0.2*/); glfwSwapBuffers(window); glfwPollEvents(); /* glReadPixels(0,0,display_w,display_h, GL_BGR, GL_UNSIGNED_BYTE, pixels); save_file(pixels,"hello",display_h,display_w); i++; if(i>10) break; */ } glfwTerminate(); return 0; } //----------------------------------------- // rendering float rand_interval(float min, float max) { return min + (max - min) * rand() / RAND_MAX; } float2 rand_float2(float minx, float maxx, float miny, float maxy) { return make_float2(rand_interval(minx, maxx), rand_interval(miny, maxy)); } float deg_to_rad(float deg) { const float PI = 3.1415; while (deg >= 360.) { deg -= 360.; } while (deg <= 0.) { deg += 360.; } return PI / 180. * deg; } void dummy_init(Simulation *s) { srand(time(NULL)); int i = 0; float rad = 3.2; float row_init = LEFT_BOUND_X + 3.8 * rad * 50 * 0.5; float height_init = GROUND_HEIGHT - 50; for (int i_ = 0; i_ < ROWS / 2; i_++) { for (int j_ = 0; j_ < COLS; j_++) { float y = height_init - rad * i_ + rand_interval(-0.4, 0.4); float x = row_init + j_ * rad + rand_interval(-0.4, 0.4); s->particles[i] = new Particle(make_float2(x, y), make_float2(0, 0.0), 1.0, 1.0, 0, make_float3(0.0f, 0.0f, 1.0f), make_float2(LEFT_BOUND_X + 2 * rad * 50 * 0.5, y)); i++; } } rad = 3.1; height_init = GROUND_HEIGHT - 50 - 0.11; row_init = LEFT_BOUND_X + 1.4 * rad * 50 * 0.5; for (int i_ = 0; i_ < ROWS / 2; i_++) { for (int j_ = 0; j_ < COLS; j_++) { float y = height_init - rad * i_ + rand_interval(-0.4, 0.4); float x = row_init + j_ * rad + rand_interval(-0.4, 0.4); s->particles[i] = new Particle(make_float2(x, y), make_float2(0, 0.0), 1.0, 1.0, 0, make_float3(1.0f, 0.0f, 0.0f), make_float2(LEFT_BOUND_X + 8 * rad * 50 * 0.5 + x, y)); i++; } } std::vector<particle_tuple *> friction_pairs = get_tuples(s->num_particles); int trig_len = 1 + (s->num_particles * (s->num_particles + 1) / 2); s->stability_upper_trig_arr = (Constraint **)malloc(sizeof(void *) * trig_len); s->collision_upper_trig_arr = (Constraint **)malloc(sizeof(void *) * trig_len); s->powerlaw_upper_trig_arr = (Constraint **)malloc(sizeof(void *) * trig_len); for (std::vector<particle_tuple *>::iterator it = friction_pairs.begin(); it != friction_pairs.end(); ++it) { Stability_Constraint *stab = new Stability_Constraint(s, (*it)->i, (*it)->j); Friction_Constraint *fc = new Friction_Constraint(s, (*it)->i, (*it)->j); Powerlaw_Constraint *pl = new Powerlaw_Constraint(s, (*it)->i, (*it)->j); if ((*it)->i < (*it)->j) { s->collision_map[(*it)->i * s->num_particles + (*it)->j] = fc; int r = (*it)->i; int c = (*it)->j; int t_idx = (s->num_particles * r) + c - (r * (r + 1) * 0.5); s->collision_upper_trig_arr[t_idx] = fc; s->powerlaw_upper_trig_arr[t_idx] = pl; s->stability_upper_trig_arr[t_idx] = stab; } } // set up wall constraints s->num_walls = 2; s->walls = (Wall **)malloc(sizeof(void *) * s->num_walls); s->walls[0] = new Wall(make_float2(-170, GROUND_HEIGHT - 45), make_float2(150., GROUND_HEIGHT - 45), make_float2(0., 1.)); s->walls[1] = new Wall(make_float2(-170, GROUND_HEIGHT - 75), make_float2(150., GROUND_HEIGHT - 75), make_float2(0., -1.)); s->num_constraints = s->num_particles + s->num_particles * s->num_walls; // ground+walls s->constraints = (Constraint **)malloc(sizeof(void *) * s->num_constraints); int constraint_ctr = 0; for (int i = 0; i < s->num_particles; i++) { s->constraints[i] = new Ground_Constraint(s, i); constraint_ctr++; } for (int i = 0; i < s->num_particles; i++) { for (int j = 0; j < s->num_walls; j++) { s->constraints[constraint_ctr] = new Wall_Constraint(s, i, j); constraint_ctr++; } } for (int i = 0; i < s->num_particles; i++) { s->particles[i]->V_pref = V_PREF_ACCEL; float u; do { u = (float)rand() / (float)RAND_MAX; } while (u >= 1.0); s->particles[i]->V_pref += sqrtf(-2.f * logf(1.f - u)) * 0.1f * cosf(2.f * _M_PI * (float)rand() / (float)RAND_MAX); s->planner->calc_pref_v_force(i); s->particles[i]->V.x = s->planner->velocity_buffer[i].x; s->particles[i]->V.y = s->planner->velocity_buffer[i].y; } } int main(int argc, char **argv) { // 0.03 sec - time step, 30 fr/sec int num_particles = ROWS * COLS; Simulation sim(num_particles, 0, MS_PER_UPDATE, "blender.txt"); dummy_init(&sim); write_config(&sim); render(&sim); return 0; }
a85afa686188dc759b37a7c04d26c90ab9a08837.cu
// Position-Based Real-Time Simulation of Large Crowds // Copyright (c) 2020, Tomer Weiss // // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, // OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER // IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Original author: Tomer Weiss <http://www.cs.ucla.edu/~tweiss> #include <cuda_gl_interop.h> #include <cuda_runtime.h> #include <stdio.h> #include <string> #include <time.h> #include <unistd.h> #include <unordered_map> #include <vector> #include <stdio.h> #include <GLFW/glfw3.h> #include <OpenGL/gl.h> #include <OpenGL/glu.h> #include <glm/glm.hpp> #include <sys/time.h> #ifdef __CUDACC__ #define CUDA_CALLABLE __host__ __device__ #else #define CUDA_CALLABLE #endif #define OUT_PATH "../out/" /* ========================= */ /* Simulation Engine Params: */ #define BLOCK_SIZE 256 #define _M_PI 3.14159265358979323846f #define K_NOT_USED -1 #define _EPSILON 0.00001f #define EPS 0.0000097 #define MS_PER_UPDATE 0.02 // 0.018 #define KSI 0.01 // 0.0093 //0.005/0.54 #define ALPHA 1.2 #define ITER_COUNT 1 #define MAX_ACCEL 20.0f #define MAX_SPEED 10.4f #define V_PREF_ACCEL 1.4f #define KSI_ACCEL 0.54f #define NN_ACCEL 10.0f /* ========================= */ /* Scenario Params: */ #define WIDTH 1280 #define HEIGHT 720 #define ROWS 16 #define COLS 36 #define GROUND_HEIGHT 45.0 #define GRID_UP_BOUND -436.0 #define GRID_LOW_BOND GROUND_HEIGHT + 20 #define LEFT_BOUND_X -285.0 #define RIGHT_BOUND_X 285.0 /* ========================= */ typedef unsigned char BYTE; typedef unsigned int uint; // Kernel definition __global__ void VecAdd(float *A, float *B, float *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } uint iDivUp(uint a, uint b) { return (a % b != 0) ? (a / b + 1) : (a / b); } class particle_tuple { public: int i; int j; particle_tuple(int i, int j) { this->i = i; this->j = j; } }; void save_file(BYTE *pixels, char *file_name, int height, int width) { FILE *imageFile; int x, y; BYTE pixel; imageFile = fopen("image.pgm", "wb"); if (imageFile == NULL) { perror("ERROR: Cannot open output file"); exit(EXIT_FAILURE); } fprintf(imageFile, "P5\n"); // P5 filetype fprintf(imageFile, "%d %d\n", width, height); // dimensions fprintf(imageFile, "255\n"); // Max pixel /* Now write a greyscale ramp */ for (x = 0; x < height; x++) { for (y = 0; y < width; y++) { pixel = pixels[x * height + y]; fputc(pixel, imageFile); } } fclose(imageFile); } // worry about destuctor later std::vector<particle_tuple *> get_tuples(int n) { std::vector<particle_tuple *> tuples; if (n >= 2) { for (int i = 0; i < n; i++) { for (int j = i + 1; j < n; j++) { tuples.push_back(new particle_tuple(i, j)); } } } else { printf("Error: only one particle\n"); } printf("\n"); return tuples; } float min(const float &a, const float &b) { return (a < b) ? a : b; } float norm(const float2 &a) { return sqrtf(a.x * a.x + a.y * a.y); } float distance(const float2 &a, const float2 &b) { return sqrtf((a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y)); } float distance_ground(const float2 &a, const float &rad, const float &ground_y) { float res = ground_y - a.y - rad; return res; } float dot(const float2 &a, const float2 &b) { return a.x * b.x + a.y * b.y; } void project_on_vector(const float2 &a, const float2 &b_normalized, float2 &out) { float d = dot(a, b_normalized); out.x = b_normalized.x * d; out.y = b_normalized.y * d; } void clamp(float2 &v, float maxValue) { float lengthV = sqrtf(v.x * v.x + v.y * v.y); if (lengthV > maxValue) { float mult = (maxValue / lengthV); v.x *= mult; v.y *= mult; } } class Wall { public: float2 x0; float2 x1; float2 n; float2 t; float2 t_norm; float a; float b; float c; float ab_sq; float ab_abs; float ac; float bc; float length; float width; Wall(float2 x0, float2 x1, float2 n) { this->x0 = x0; this->x1 = x1; this->n = n; this->t = make_float2(x1.x - x0.x, x1.y - x0.y); this->length = sqrtf(t.x * t.x + t.y * t.y); this->t_norm = make_float2(t.x / length, t.y / length); this->width = 0.05; // TODO fix later this->a = x1.y - x0.y; this->b = x0.x - x1.x; this->c = -(a * x0.x + b * x0.y); this->ab_sq = a * a + b * b; this->ab_abs = sqrtf(a * a + b * b); this->ac = a * c; this->bc = b * c; } }; class Particle { public: float2 X; float2 X_pred; float2 Delta_x; // becomes Delta_buf int Delta_x_ctr; // becomes Delta_buf_ctr float2 V; float2 V_prev; float V_pref; float2 Accel; float mass; float inv_mass; int group; float2 goal; float r; // grid int cell_id; int cell_x; int cell_y; float3 color; Particle(float2 X, float2 V, float mass, float r, int group, float3 color, float2 goal) { this->X = X; this->X_pred = make_float2(X.x, X.y); this->Delta_x = make_float2(0., 0.); this->Delta_x_ctr = 0; this->V = V; this->Accel = make_float2(0., 0.); this->V_prev = make_float2(0., 0.); this->V_pref = V_PREF_ACCEL; this->mass = mass; this->inv_mass = 1.0 / mass; this->group = group; this->goal = goal; this->r = r; // TODO add cell_id, x, y for multiple grids this->cell_id = K_NOT_USED; this->cell_x = K_NOT_USED; this->cell_y = K_NOT_USED; this->color = color; } }; class Simulation; class Grid { public: static const int max_per_cell = 10; int num_particles; int num_cells; int num_rows; int num_cols; float cell_size; float2 min_; float2 max_; int *grid_counters; // stores num of particles in each cell int **grid_cells; // stores the particles indicies for each cell // has a maximum number of particles per cell uint num_blocks; uint num_threads; Grid(int num_particles, float dummy_cell_size, float2 min_, float2 max_) { this->num_particles = num_particles; this->cell_size = dummy_cell_size; this->min_ = min_; this->max_ = max_; this->num_cells = (max_.x - min_.x) * (max_.y - min_.y); this->num_cols = (max_.x - min_.x) / cell_size; this->num_rows = (max_.y - min_.y) / cell_size; this->grid_counters = (int *)malloc(num_cells * (sizeof(int))); for (int i = 0; i < num_cells; i++) { this->grid_counters[i] = 0; } this->grid_cells = (int **)malloc(num_cells * (sizeof(int *))); for (int i = 0; i < num_cells; i++) { int *particle_indices = (int *)malloc(max_per_cell * (sizeof(int))); for (int j = 0; j < max_per_cell; j++) { particle_indices[j] = 0; } this->grid_cells[i] = particle_indices; } this->num_threads = min(BLOCK_SIZE, num_particles); this->num_blocks = iDivUp(num_particles, this->num_threads); } void update_stability(Particle **particles) // this is a kernel function { // reset\update grid counters for (int i = 0; i < num_cells; i++) { grid_counters[i] = 0; for (int j = 0; j < max_per_cell; j++) { grid_cells[i][j] = K_NOT_USED; } } // adding particles to grid for (int i = 0; i < num_particles; i++) { float2 X = particles[i]->X; int x = (X.x - min_.x) / cell_size; int y = (X.y - min_.y) / cell_size; int cell_id = y * num_rows + x; particles[i]->cell_id = cell_id; particles[i]->cell_x = x; particles[i]->cell_y = y; int tmp = grid_counters[cell_id]; grid_cells[cell_id][tmp] = i; grid_counters[cell_id] += 1; } } bool is_colliding_stability(Particle **particles, int i, int j) const { float2 X = particles[i]->X; int xi = (X.x - min_.x) / cell_size; int yi = (X.y - min_.y) / cell_size; int cell_id_i = yi * num_rows + xi; X = particles[j]->X; int xj = (X.x - min_.x) / cell_size; int yj = (X.y - min_.y) / cell_size; int cell_id_j = yj * num_rows + xj; int is_x_neighbour = xi - xj; int is_y_neighbour = yi - yj; bool res = is_x_neighbour >= -3 && is_x_neighbour <= 3 && is_y_neighbour >= -3 && is_y_neighbour <= 3; return res; } void update(Particle **particles) // this is a kernel function { // reset\update grid counters for (int i = 0; i < num_cells; i++) { grid_counters[i] = 0; for (int j = 0; j < max_per_cell; j++) { grid_cells[i][j] = K_NOT_USED; } } // adding particles to grid for (int i = 0; i < num_particles; i++) { float2 X = particles[i]->X_pred; int x = (X.x - min_.x) / cell_size; int y = (X.y - min_.y) / cell_size; int cell_id = y * num_rows + x; particles[i]->cell_id = cell_id; particles[i]->cell_x = x; particles[i]->cell_y = y; int tmp = grid_counters[cell_id]; grid_cells[cell_id][tmp] = i; grid_counters[cell_id] += 1; } } /*two options: 1) is colliding should be a 2d matrix that we preprocess in the update step then, is colliding just returns true\false based on that matrix 2) have each particle loop around the surronding cells to see if they are colliding */ bool is_colliding(Particle **particles, int i, int j) const { float2 X = particles[i]->X_pred; int xi = (X.x - min_.x) / cell_size; int yi = (X.y - min_.y) / cell_size; int cell_id_i = yi * num_rows + xi; X = particles[j]->X_pred; int xj = (X.x - min_.x) / cell_size; int yj = (X.y - min_.y) / cell_size; int cell_id_j = yj * num_rows + xj; int is_x_neighbour = xi - xj; int is_y_neighbour = yi - yj; bool res = is_x_neighbour >= -3 && is_x_neighbour <= 3 && is_y_neighbour >= -3 && is_y_neighbour <= 3; return res; } ~Grid() { free(grid_counters); for (int i = 0; i < num_cells; i++) { free(grid_cells[i]); } free(grid_cells); } }; class Constraint { // int i1,i2,... praticle indices public: const Simulation *sim; int *indicies; int num_particles; float2 *delta_X; bool active; Constraint(Simulation *sim, int num_particles) { this->sim = sim; this->num_particles = num_particles; this->delta_X = (float2 *)malloc(num_particles * sizeof(float2)); this->indicies = (int *)malloc(num_particles * (sizeof(int))); this->active = false; for (int i = 0; i < num_particles; i++) { delta_X[i] = make_float2(0., 0.); } } virtual void project(Particle **particles) = 0; // forcing implemntation in base class virtual ~Constraint() { free(indicies); free(delta_X); } }; // should be constructed once for each scenerio class PathPlanner { public: int num_particles; Particle **particles; float2 *goals; float2 *velocity_buffer; PathPlanner(int num_particles, Particle **particles) { this->num_particles = num_particles; this->particles = particles; this->velocity_buffer = (float2 *)malloc(sizeof(float2) * num_particles); this->goals = (float2 *)malloc(sizeof(float2) * num_particles); for (int i = 0; i < num_particles; i++) { this->velocity_buffer[i] = make_float2(0., 0.); // this->goals[i]=make_float2(particles[i]->goal.x,particles[i]->goal.y); } } // TODO get current velocity, adjust predicted particle accordinfly for // smoothness void calc_pref_v_force(const int &particle_id) // returns velocity { const Particle *p = this->particles[particle_id]; float2 goal = p->goal; this->velocity_buffer[particle_id].x = goal.x - p->X.x; this->velocity_buffer[particle_id].y = goal.y - p->X.y; const float length = sqrtf(velocity_buffer[particle_id].x * velocity_buffer[particle_id].x + velocity_buffer[particle_id].y * velocity_buffer[particle_id].y); if (length != 0) { this->velocity_buffer[particle_id].x /= length; this->velocity_buffer[particle_id].y /= length; this->velocity_buffer[particle_id].x *= p->V_pref; this->velocity_buffer[particle_id].y *= p->V_pref; } } void calc_velocity(const int &particle_id) // returns velocity { const Particle *p = this->particles[particle_id]; // const float2 goal=p->goal; float2 goal = p->goal; // goal.x=p->X.x; // goal.y=GROUND_HEIGHT; this->velocity_buffer[particle_id].x = goal.x - p->X.x; this->velocity_buffer[particle_id].y = goal.y - p->X.y; const float length = sqrtf(velocity_buffer[particle_id].x * velocity_buffer[particle_id].x + velocity_buffer[particle_id].y * velocity_buffer[particle_id].y); if (length != 0) { this->velocity_buffer[particle_id].x /= length; this->velocity_buffer[particle_id].y /= length; this->velocity_buffer[particle_id].x *= p->V_pref; this->velocity_buffer[particle_id].y *= p->V_pref; // part below needs to be removed // add clamping here! /* this->velocity_buffer[particle_id].x=(1.0-KSI)*particles[particle_id]->V.x +KSI*velocity_buffer[particle_id].x; this->velocity_buffer[particle_id].y=(1.0-KSI)*particles[particle_id]->V.y +KSI*velocity_buffer[particle_id].y; */ // clamping v between iterations /* float max_dv_mag = 0.08; float dv_x=this->velocity_buffer[particle_id].x-particles[particle_id]->V_prev.x; float dv_y=this->velocity_buffer[particle_id].y-particles[particle_id]->V_prev.y; float dv_mag=sqrt(dv_x*dv_x+dv_y*dv_y); if(dv_mag>max_dv_mag) { float mult = (max_dv_mag/dv_mag); this->velocity_buffer[particle_id].x*=mult; this->velocity_buffer[particle_id].y*=mult; //printf("%.3f %.3f\n",dv_mag,mult); } */ } } ~PathPlanner() { free(velocity_buffer); free(goals); } }; class Simulation { public: int num_particles; int num_constraints; float time_step; Constraint **constraints; Particle **particles; PathPlanner *planner; Grid *grid; Grid *stability_grid; FILE *out; std::unordered_map<unsigned long long, Constraint *> collision_map; Constraint **collision_upper_trig_arr; Constraint **powerlaw_upper_trig_arr; Constraint **stability_upper_trig_arr; int step_no; float friction_constraint_stiffness; int num_walls; Wall **walls; Simulation(int num_particles, int num_constraints, float time_step, char *out_path) { this->num_particles = num_particles; this->time_step = time_step; this->particles = (Particle **)malloc(sizeof(void *) * num_particles); this->planner = new PathPlanner(num_particles, this->particles); this->out = fopen(out_path, "w"); this->num_constraints = 0; this->constraints = NULL; this->collision_map = std::unordered_map<unsigned long long, Constraint *>(); this->collision_upper_trig_arr = NULL; this->powerlaw_upper_trig_arr = NULL; this->stability_upper_trig_arr = NULL; this->grid = new Grid(num_particles, 2.66, make_float2(LEFT_BOUND_X - 50, GRID_UP_BOUND - 10), make_float2(RIGHT_BOUND_X + 50, GRID_LOW_BOND)); /* this->stability_grid=new Grid(num_particles,2.16, //5.66, //7.66, make_float2(LEFT_BOUND_X-50,GRID_UP_BOUND-10), make_float2(RIGHT_BOUND_X+50,GRID_LOW_BOND)); */ this->stability_grid = new Grid(num_particles, 2.2, // 5.66, //7.66, make_float2(LEFT_BOUND_X - 50, GRID_UP_BOUND - 10), make_float2(RIGHT_BOUND_X + 50, GRID_LOW_BOND)); this->step_no = 1; this->friction_constraint_stiffness = 0.22f; this->num_walls = 0; this->walls = NULL; } void calc_constraint_stiffness(int n) { // 1.-(1.-0.25)**(4./6) friction_constraint_stiffness = 1.0f - powf(1.0f - friction_constraint_stiffness, (1.0f / n)); } void stabilization() { stability_grid->update_stability(particles); for (int i = 0; i < 1; i++) { for (int i = 0; i < num_particles; i++) { particles[i]->Delta_x.x = 0.; particles[i]->Delta_x.y = 0.; particles[i]->Delta_x_ctr = 0; } // friction constraints for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -1; x <= 1; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < stability_grid->num_cols) { for (int y = -1; y <= 1; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < stability_grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * stability_grid->num_rows); if (stability_grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < stability_grid->grid_counters[cell_id]; idx++) { int j = stability_grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); stability_upper_trig_arr[t_idx]->project(particles); } } } } } } } } // traverse friction constraints to accumalte deltas for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -1; x <= 1; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < stability_grid->num_cols) { for (int y = -1; y <= 1; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < stability_grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * stability_grid->num_rows); if (stability_grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < stability_grid->grid_counters[cell_id]; idx++) { int j = stability_grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); if (stability_upper_trig_arr[t_idx]->active) { for (int ctr = 0; ctr < stability_upper_trig_arr[t_idx]->num_particles; ctr++) { int p_idx = stability_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += stability_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += stability_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } stability_upper_trig_arr[t_idx]->active = false; } } } } } } } } } for (int i = 0; i < num_particles; i++) { if (particles[i]->Delta_x_ctr > 0) { float dx = ALPHA * particles[i]->Delta_x.x / particles[i]->Delta_x_ctr; float dy = ALPHA * particles[i]->Delta_x.y / particles[i]->Delta_x_ctr; particles[i]->X_pred.x += dx; particles[i]->X_pred.y += dy; particles[i]->X.x += dx; particles[i]->X.y += dy; } } } } void project_velocity_constraints() { for (int i = 0; i < num_particles; i++) { particles[i]->Delta_x.x = 0.; particles[i]->Delta_x.y = 0.; particles[i]->Delta_x_ctr = 0; } for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -2; x <= 2; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < grid->num_cols) { for (int y = -2; y <= 2; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * grid->num_rows); if (grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < grid->grid_counters[cell_id]; idx++) { int j = grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); powerlaw_upper_trig_arr[t_idx]->project(particles); } } } } } } } } // traverse friction constraints to accumalte deltas for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -2; x <= 2; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < grid->num_cols) { for (int y = -2; y <= 2; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * grid->num_rows); if (grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < grid->grid_counters[cell_id]; idx++) { int j = grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); if (powerlaw_upper_trig_arr[t_idx]->active) { for (int ctr = 0; ctr < powerlaw_upper_trig_arr[t_idx]->num_particles; ctr++) { int p_idx = powerlaw_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += powerlaw_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += powerlaw_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } powerlaw_upper_trig_arr[t_idx]->active = false; } } } } } } } } } for (int i = 0; i < num_particles; i++) { particles[i]->V.x *= 0.99; particles[i]->V.y *= 0.99; float k = 1.; // 0.05; //stiffness changes with iteration; float2 dv_pref = particles[i]->V_prev; dv_pref.x = k * (planner->velocity_buffer[i].x - particles[i]->V.x); dv_pref.y = k * (planner->velocity_buffer[i].y - particles[i]->V.y); clamp(dv_pref, time_step * MAX_ACCEL); if (particles[i]->Delta_x_ctr > 0) { float dvx = (dv_pref.x + particles[i]->Delta_x.x) / (1. + particles[i]->Delta_x_ctr); float dvy = (dv_pref.y + particles[i]->Delta_x.y) / (1. + particles[i]->Delta_x_ctr); particles[i]->V.x += dvx; particles[i]->V.y += dvy; } else { particles[i]->V.x += dv_pref.x; particles[i]->V.y += dv_pref.y; } // clamp(particles[i]->V,MAX_SPEED); particles[i]->X_pred.x = particles[i]->X.x + particles[i]->V.x * time_step; particles[i]->X_pred.y = particles[i]->X.y + particles[i]->V.y * time_step; // perhaps clamp cannot be here, but rather in the constraints themselves // so to force that each constraint cannot become // TODO also need to clamp maximum speed change clamp } } void project_constraints() { for (int i = 0; i < num_particles; i++) { particles[i]->Delta_x.x = 0.; particles[i]->Delta_x.y = 0.; particles[i]->Delta_x_ctr = 0; } // friction constraints for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -1; x <= 1; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < grid->num_cols) { for (int y = -1; y <= 1; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * grid->num_rows); if (grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < grid->grid_counters[cell_id]; idx++) { int j = grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { // collision_map[i * num_particles + j]->project(particles); int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); collision_upper_trig_arr[t_idx]->project(particles); // powerlaw_upper_trig_arr[t_idx]->project(particles); // stability_upper_trig_arr[t_idx]->project(particles); } } } } } } } } // ground constraints for (int i = 0; i < num_constraints; i++) { constraints[i]->project(particles); } // traverse friction constraints to accumalte deltas for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -1; x <= 1; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < grid->num_cols) { for (int y = -1; y <= 1; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * grid->num_rows); if (grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < grid->grid_counters[cell_id]; idx++) { int j = grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); if (collision_upper_trig_arr[t_idx]->active) { for (int ctr = 0; ctr < collision_upper_trig_arr[t_idx]->num_particles; ctr++) { int p_idx = collision_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += collision_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += collision_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } collision_upper_trig_arr[t_idx]->active = false; } /* if(powerlaw_upper_trig_arr[t_idx]->active) { for(int ctr=0; ctr<powerlaw_upper_trig_arr[t_idx]->num_particles ;ctr++) { int p_idx=powerlaw_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += powerlaw_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += powerlaw_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } powerlaw_upper_trig_arr[t_idx]->active=false; } */ } } } } } } } } /* for(std::unordered_map<unsigned long long, Constraint*>::const_iterator it = collision_map.cbegin(); it != collision_map.cend(); ++it) { if(it->second->active) { //printf("Super Happy active constraint :-)\n"); for(int j=0;j<it->second->num_particles;j++) { int idx=it->second->indicies[j]; particles[idx]->Delta_x.x += it->second->delta_X[j].x; particles[idx]->Delta_x.y += it->second->delta_X[j].y; particles[idx]->Delta_x_ctr++; } } } */ // traverse ground and wall constraints to accumalte deltas for (int i = 0; i < num_constraints; i++) { if (constraints[i]->active) { for (int j = 0; j < constraints[i]->num_particles; j++) { int idx = constraints[i]->indicies[j]; particles[idx]->Delta_x.x += constraints[i]->delta_X[j].x; particles[idx]->Delta_x.y += constraints[i]->delta_X[j].y; particles[idx]->Delta_x_ctr++; } constraints[i]->active = false; } } for (int i = 0; i < num_particles; i++) { if (particles[i]->Delta_x_ctr > 0) { particles[i]->X_pred.x += ALPHA * particles[i]->Delta_x.x / particles[i]->Delta_x_ctr; particles[i]->X_pred.y += ALPHA * particles[i]->Delta_x.y / particles[i]->Delta_x_ctr; // clamp if (false) { float maxValue = 0.069; float length_d_i = distance(particles[i]->X_pred, particles[i]->X); if (length_d_i > maxValue) { float mult = (maxValue / length_d_i); particles[i]->X_pred.x = particles[i]->X.x + (particles[i]->X_pred.x - particles[i]->X.x) * mult; particles[i]->X_pred.y = particles[i]->X.y + (particles[i]->X_pred.y - particles[i]->X.y) * mult; } /* float max_dv_mag = 0.0013; float curr_v_x=((particles[i]->X_pred.x-particles[i]->X.x)/time_step); float curr_v_y=((particles[i]->X_pred.y-particles[i]->X.y)/time_step); float dv_x=curr_v_x-particles[i]->V_prev.x; float dv_y=curr_v_y-particles[i]->V_prev.y; float dv_mag=sqrt(dv_x*dv_x+dv_y*dv_y); if(dv_mag>max_dv_mag) { float mult = (max_dv_mag/dv_mag); particles[i]->X_pred.x=particles[i]->X.x+curr_v_x*mult; particles[i]->X_pred.y=particles[i]->X.y+curr_v_y*mult; //printf("%.3f %.3f\n",dv_mag,mult); } */ } } } } void do_time_step_force() { printf("Force Solve Frame %d\n", step_no); for (int i = 0; i < num_particles; i++) { planner->calc_pref_v_force(i); particles[i]->V_prev.x = planner->velocity_buffer[i].x; particles[i]->V_prev.y = planner->velocity_buffer[i].y; } // TODO change grid cell size stability_grid->update_stability(particles); // update grid by current positions // TODO calculate preffered speed //_vPref *= _prefSpeed/sqrtf(distSqToGoal); for (int i = 0; i < 1; i++) { for (int i = 0; i < num_particles; i++) { particles[i]->Delta_x.x = 0.; particles[i]->Delta_x.y = 0.; particles[i]->Delta_x_ctr = 0; } for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -3; x <= 3; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < stability_grid->num_cols) { for (int y = -3; y <= 3; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < stability_grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * stability_grid->num_rows); if (stability_grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < stability_grid->grid_counters[cell_id]; idx++) { int j = stability_grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); // printf("Will project %d %d\n",i,j); stability_upper_trig_arr[t_idx]->project(particles); } } } } } } } } for (int i = 0; i < num_particles; i++) { // iterate over adjacent cells for (int x = -3; x <= 3; x++) { int cur_x = particles[i]->cell_x + x; if (cur_x >= 0 && cur_x < stability_grid->num_cols) { for (int y = -3; y <= 3; y++) { int cur_y = particles[i]->cell_y + y; if (cur_y >= 0 && cur_y < stability_grid->num_rows) { int cell_id = particles[i]->cell_id + x + (y * stability_grid->num_rows); if (stability_grid->grid_counters[cell_id] > 0) { for (int idx = 0; idx < stability_grid->grid_counters[cell_id]; idx++) { int j = stability_grid->grid_cells[cell_id][idx]; if (i < j) // so only do collision once { int t_idx = (num_particles * i) + j - (i * (i + 1) * 0.5); if (stability_upper_trig_arr[t_idx]->active) { for (int ctr = 0; ctr < stability_upper_trig_arr[t_idx]->num_particles; ctr++) { int p_idx = stability_upper_trig_arr[t_idx]->indicies[ctr]; particles[p_idx]->Delta_x.x += stability_upper_trig_arr[t_idx]->delta_X[ctr].x; particles[p_idx]->Delta_x.y += stability_upper_trig_arr[t_idx]->delta_X[ctr].y; particles[p_idx]->Delta_x_ctr++; } stability_upper_trig_arr[t_idx]->active = false; } } } } } } } } } for (int i = 0; i < num_particles; i++) { particles[i]->Accel.x = (particles[i]->V_prev.x - particles[i]->V.x) / KSI_ACCEL; particles[i]->Accel.y = (particles[i]->V_prev.y - particles[i]->V.y) / KSI_ACCEL; if (particles[i]->Delta_x_ctr > 0) { /* define force constraint checks distance, tao. if distance < Nearest Neighbour calculate Delta_force for each particle */ // accumalte and average delta forces for each particle // apply acceleration clamp // update velocity and positions // printf("In particles step %d\n",i); particles[i]->Accel.x += particles[i]->Delta_x.x / particles[i]->Delta_x_ctr; particles[i]->Accel.y += particles[i]->Delta_x.y / particles[i]->Delta_x_ctr; clamp(particles[i]->Accel, MAX_ACCEL); } particles[i]->V.x += particles[i]->Accel.x * time_step; particles[i]->V.y += particles[i]->Accel.y * time_step; printf("%d Speed %.4f\n", i, sqrtf(particles[i]->V.x * particles[i]->V.x + particles[i]->V.y * particles[i]->V.y)); particles[i]->X.x += particles[i]->V.x * time_step; particles[i]->X.y += particles[i]->V.y * time_step; } } step_no++; } void do_time_step() { printf("PBD Solve Frame %d\n", step_no); for (int i = 0; i < num_particles; i++) { planner->calc_velocity(i); // particles[i]->V.x=planner->velocity_buffer[i].x; // particles[i]->V.y=planner->velocity_buffer[i].y; particles[i]->V_prev.x = particles[i]->V.x; particles[i]->V_prev.y = particles[i]->V.y; // For sand simulation: // particles[i]->V.y+=time_step*9.81; //times mass TODO? // particles[i]->V.x*=0.99; // particles[i]->V.y*=0.99; // printf("particle %d // speed=(%.2f,%.2f)\n",i,particles[i]->V.x,particles[i]->V.y); } for (int i = 0; i < num_particles; i++) { particles[i]->X_pred.x += time_step * particles[i]->V.x; particles[i]->X_pred.y += time_step * particles[i]->V.y; } //----------------------stability grid stuff // stabilization(); //-----------------------project constraints grid->update(particles); project_velocity_constraints(); for (int i = 1; i < (ITER_COUNT + 1); i++) { calc_constraint_stiffness(i); project_constraints(); } for (int i = 0; i < num_particles; i++) { float dx = particles[i]->X_pred.x - particles[i]->X.x; float dy = particles[i]->X_pred.y - particles[i]->X.y; particles[i]->V.x = dx / time_step; particles[i]->V.y = dy / time_step; particles[i]->X.x = particles[i]->X_pred.x; particles[i]->X.y = particles[i]->X_pred.y; } step_no++; } ~Simulation() { fclose(this->out); for (int i = 0; i < num_particles; i++) { delete particles[i]; } for (int i = 0; i < num_constraints; i++) { delete constraints[i]; } /* for (std::unordered_map<unsigned long long, Constraint*>::const_iterator it = collision_map.begin(); it != collision_map.end(); ++it) { delete it->second; } */ if (walls != NULL) { for (int i = 0; i < num_walls; i++) { delete walls[i]; } } int trig_len = 1 + (num_particles * (num_particles + 1) / 2); for (int i = 0; i < num_particles; i++) { for (int j = 0; j < num_particles; j++) { if (i < j) { int r = i; int c = j; int t_idx = (num_particles * r) + c - (r * (r + 1) * 0.5); if (collision_upper_trig_arr != NULL) { delete collision_upper_trig_arr[t_idx]; } if (powerlaw_upper_trig_arr != NULL) { delete powerlaw_upper_trig_arr[t_idx]; } if (stability_upper_trig_arr != NULL) { delete stability_upper_trig_arr[t_idx]; } } } } free(constraints); free(particles); free(collision_upper_trig_arr); free(powerlaw_upper_trig_arr); delete planner; delete grid; } }; class Stability_Constraint : public Constraint { public: int i; int j; float w_i_coef; float w_j_coef; float2 contact_normal; float2 tangential_displacement; float2 x_pred_w_delta1; float2 x_pred_w_delta2; float2 out; float collision_margin; Stability_Constraint(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; // TODO if seg fault happens, it is because the particles are set up after // the constraints this->w_i_coef = sim->particles[i]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->w_j_coef = -sim->particles[j]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->contact_normal = make_float2(0.0, 0.0); this->tangential_displacement = make_float2(0.0, 0.0); this->x_pred_w_delta1 = make_float2(0., 0.); this->x_pred_w_delta2 = make_float2(0., 0.); this->out = make_float2(0., 0.); } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; if (true) { float d = distance(particles[i]->X, particles[j]->X); float f = d - collision_margin; if (f < 0) { contact_normal.x = 0.; contact_normal.y = 0.; tangential_displacement.x = 0.; tangential_displacement.y = 0.; x_pred_w_delta1.x = 0.; x_pred_w_delta1.y = 0.; x_pred_w_delta2.x = 0.; x_pred_w_delta2.y = 0.; out.x = 0.; out.y = 0.; contact_normal.x = (particles[i]->X.x - particles[j]->X.x) / d; contact_normal.y = (particles[i]->X.y - particles[j]->X.y) / d; delta_X[0].x = -w_i_coef * contact_normal.x * f; delta_X[0].y = -w_i_coef * contact_normal.y * f; delta_X[1].x = -w_j_coef * contact_normal.x * f; delta_X[1].y = -w_j_coef * contact_normal.y * f; active = true; } } } }; class Powerlaw_Force_Constraint : public Constraint { public: static const float k = 1.5f; // stiffness static const float m = 2.0f; static const float tao0 = 3.f; int i; int j; float w_i_coef; float w_j_coef; float2 out; float collision_margin; float radius_init; float radius_sq_init; float delta_t; float dv_i; float dv_j; Powerlaw_Force_Constraint(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; this->w_i_coef = sim->particles[i]->inv_mass; this->w_j_coef = sim->particles[j]->inv_mass; this->out = make_float2(0., 0.); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->radius_init = (sim->particles[i]->r + sim->particles[j]->r); this->radius_sq_init = radius_init * radius_init; this->delta_t = sim->time_step; this->dv_i = 1. / delta_t; this->dv_j = -1. / delta_t; } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; float2 x_i = particles[i]->X; float2 x_j = particles[j]->X; const float dist = distance(particles[i]->X, particles[j]->X); float radius_sq = radius_sq_init; if (dist < radius_init) { radius_sq = (radius_init - dist) * (radius_init - dist); } /* float v_x=(particles[i]->X_pred.x-x_i.x)/delta_t -(particles[j]->X_pred.x-x_j.x)/delta_t; float v_y=(particles[i]->X_pred.y-x_i.y)/delta_t -(particles[j]->X_pred.y-x_j.y)/delta_t; */ float v_x = particles[i]->V.x - particles[j]->V.x; float v_y = particles[i]->V.y - particles[j]->V.y; float x0 = x_i.x - x_j.x; float y0 = x_i.y - x_j.y; float v_sq = v_x * v_x + v_y * v_y; float x0_sq = x0 * x0; float y0_sq = y0 * y0; float x_sq = x0_sq + y0_sq; float a = v_sq; float b = -v_x * x0 - v_y * y0; float b_sq = b * b; float c = x_sq - radius_sq; float d_sq = b_sq - a * c; if (d_sq > 0 && (a < -_EPSILON || a > _EPSILON)) { float d = sqrtf(d_sq); float tao = (b - d) / a; if (dist < NN_ACCEL && tao > 0) { float c_x_nom = (v_sq * x0 + b * v_x) / d; float c_x = v_x - c_x_nom; float c_y_nom = (v_sq * y0 + b * v_y) / d; float c_y = v_y - c_y_nom; float F_s = -k * exp(-tao / tao0) / (a * powf(tao, m)) * (m / tao + 1. / tao0); float F_x = c_x * F_s; float F_y = c_y * F_s; delta_X[0].x = F_x; delta_X[0].y = F_y; delta_X[1].x = -F_x; delta_X[1].y = -F_y; active = true; } } } }; class Powerlaw_Constraint : public Constraint { public: static const float k = 1.5; // stiffness static const float tao0 = 4.; static const float maxValue = 0.2; // delta_t * pref_speed int i; int j; float w_i_coef; float w_j_coef; float2 out; float collision_margin; float radius_init; float radius_sq_init; float delta_t; float dv_i; float dv_j; float max_acceleration; Powerlaw_Constraint(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; // TODO if seg fault happens, it is because the particles are set up after // the constraints this->w_i_coef = sim->particles[i]->inv_mass; this->w_j_coef = sim->particles[j]->inv_mass; this->out = make_float2(0., 0.); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->radius_init = (sim->particles[i]->r + sim->particles[j]->r); this->radius_sq_init = radius_init * radius_init; this->delta_t = sim->time_step; this->dv_i = 1.; // 1./delta_t; this->dv_j = -1.; //-1./delta_t; this->max_acceleration = sim->time_step * MAX_ACCEL; } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; float2 x_i = particles[i]->X; float2 x_j = particles[j]->X; const float dist = distance(particles[i]->X, particles[j]->X); float radius_sq = radius_sq_init; if (dist < radius_init) { radius_sq = (radius_init - dist) * (radius_init - dist); } float v_x = (particles[i]->X_pred.x - x_i.x) / delta_t - (particles[j]->X_pred.x - x_j.x) / delta_t; float v_y = (particles[i]->X_pred.y - x_i.y) / delta_t - (particles[j]->X_pred.y - x_j.y) / delta_t; float x0 = x_i.x - x_j.x; float y0 = x_i.y - x_j.y; float v_sq = v_x * v_x + v_y * v_y; float x0_sq = x0 * x0; float y0_sq = y0 * y0; float x_sq = x0_sq + y0_sq; float a = v_sq; float b = -v_x * x0 - v_y * y0; float b_sq = b * b; float c = x_sq - radius_sq; float d_sq = b_sq - a * c; if (false && d_sq > 0 && (a < -_EPSILON || a > _EPSILON)) { float d = sqrtf(d_sq); float tao = (b - d) / a; float tao_alt = (b + d) / a; // pick the min solution that is > 0 tao = tao_alt < tao && tao_alt > 0 ? tao_alt : tao; // need to consider +- sign perhaps? if (tao > 0 /* && tao<tao0 */) { float clamp_tao = exp(-tao * tao / tao0); float c_tao = abs(tao - tao0); float tao_sq = c_tao * c_tao; float grad_x_i = 2 * c_tao * ((dv_i / a) * ((-2. * v_x * tao) - (x0 + (v_y * x0 * y0 + v_x * (radius_sq - y0_sq)) / d))); float grad_y_i = 2 * c_tao * ((dv_i / a) * ((-2. * v_y * tao) - (y0 + (v_x * x0 * y0 + v_y * (radius_sq - x0_sq)) / d))); float grad_x_j = -grad_x_i; float grad_y_j = -grad_y_i; float stiff = exp(-tao * tao / tao0); float s = 0.5 * tao_sq / (particles[i]->inv_mass * (grad_y_i * grad_y_i + grad_x_i * grad_x_i) + particles[j]->inv_mass * (grad_y_j * grad_y_j + grad_x_j * grad_x_j)); active = true; delta_X[0].x = s * w_i_coef * grad_x_i; delta_X[0].y = s * w_i_coef * grad_y_i; clamp(delta_X[0], max_acceleration); delta_X[1].x = s * w_j_coef * grad_x_j; delta_X[1].y = s * w_j_coef * grad_y_j; clamp(delta_X[1], max_acceleration); } if (false && tao > 0) { float clamp_tao = exp(-tao * tao / tao0); float c_x_nom = (v_sq * x0 + b * v_x) / d; float c_x = v_x - c_x_nom; float c_y_nom = (v_sq * y0 + b * v_y) / d; float c_y = v_y - c_y_nom; float grad_x_i = dv_i + (v_y * y0 * dv_i / d) + ((v_y * x0 * y0 + (radius_sq - y0_sq) * v_x) * dv_i * c_x_nom / d_sq); float grad_y_i = dv_i + (v_x * x0 * dv_i / d) + ((v_x * x0 * y0 + (radius_sq - x0_sq) * v_y) * dv_i * c_y_nom / d_sq); float grad_x_j = dv_j + (v_y * y0 * dv_j / d) + ((v_y * x0 * y0 + (radius_sq - y0_sq) * v_x) * dv_j * c_x_nom / d_sq); float grad_y_j = dv_j + (v_x * x0 * dv_j / d) + ((v_x * x0 * y0 + (radius_sq - x0_sq) * v_y) * dv_j * c_y_nom / d_sq); grad_x_i *= 2 * abs(c_x); grad_y_i *= 2 * abs(c_y); grad_x_j *= 2 * abs(c_x); grad_y_j *= 2 * abs(c_y); c_x *= c_x; c_y *= c_y; float s_fin = c_x + c_y; float stiff = ((2. / tao) + (1. / tao0)) * k * exp(-tao / tao0) / (v_sq * tao * tao); // float s_x=stiff*c_x/(particles[i]->inv_mass*grad_x_i*grad_x_i // +particles[j]->inv_mass*grad_x_j*grad_x_j); // float s_y=stiff*c_y/(particles[i]->inv_mass*grad_y_i*grad_y_i // +particles[j]->inv_mass*grad_y_j*grad_y_j); // s_x*=s_x; // s_y*=s_y; stiff = exp(-tao * tao / tao0); float s = 0.39 * s_fin / (particles[i]->inv_mass * (grad_y_i * grad_y_i + grad_x_i * grad_x_i) + particles[j]->inv_mass * (grad_y_j * grad_y_j + grad_x_j * grad_x_j)); // grad_y_i=%f\n",stiff,s,grad_x_i,grad_y_i); active = true; delta_X[0].x = s * w_i_coef * grad_x_i; delta_X[0].y = s * w_i_coef * grad_y_i; clamp(delta_X[0], max_acceleration); delta_X[1].x = s * w_j_coef * grad_x_j; delta_X[1].y = s * w_j_coef * grad_y_j; clamp(delta_X[1], max_acceleration); } } } }; class Friction_Constraint2 : public Constraint { public: // usually easier to keep object moving than start movement, so mu_s>mu_k // some friction results // http://hypertextbook.com/facts/2007/TabraizRasul.shtml static const float mui_static = 0.0f; // 0.00023; //0.021; static const float mui_kinematic = 0.0f; // 0.00017; //0.02; /*typical values: http://spiff.rit.edu/classes/phys211/lectures/fric/fric_all.html smooth 0.05 medium 0.3 rough 1.0 */ int i; int j; float w_i_coef; float w_j_coef; float2 contact_normal; float2 tangential_displacement; float2 x_pred_w_delta1; float2 x_pred_w_delta2; float2 out; float collision_margin; Friction_Constraint2(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; // TODO if seg fault happens, it is because the particles are set up after // the constraints this->w_i_coef = sim->particles[i]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->w_j_coef = -sim->particles[j]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->contact_normal = make_float2(0.0, 0.0); this->tangential_displacement = make_float2(0.0, 0.0); this->x_pred_w_delta1 = make_float2(0., 0.); this->x_pred_w_delta2 = make_float2(0., 0.); this->out = make_float2(0., 0.); } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; if (true) { float d = distance(particles[i]->X_pred, particles[j]->X_pred); float f = d - collision_margin; if (f < 0) { contact_normal.x = 0.; contact_normal.y = 0.; tangential_displacement.x = 0.; tangential_displacement.y = 0.; x_pred_w_delta1.x = 0.; x_pred_w_delta1.y = 0.; x_pred_w_delta2.x = 0.; x_pred_w_delta2.y = 0.; out.x = 0.; out.y = 0.; contact_normal.x = (particles[i]->X_pred.x - particles[j]->X_pred.x) / d; contact_normal.y = (particles[i]->X_pred.y - particles[j]->X_pred.y) / d; delta_X[0].x = -w_i_coef * contact_normal.x * f; delta_X[0].y = -w_i_coef * contact_normal.y * f; delta_X[1].x = -w_j_coef * contact_normal.x * f; delta_X[1].y = -w_j_coef * contact_normal.y * f; x_pred_w_delta1.x = delta_X[0].x + particles[i]->X_pred.x; x_pred_w_delta1.y = delta_X[0].y + particles[i]->X_pred.y; x_pred_w_delta2.x = delta_X[1].x + particles[j]->X_pred.x; x_pred_w_delta2.y = delta_X[1].y + particles[j]->X_pred.y; float n_norm = distance(x_pred_w_delta1, x_pred_w_delta2); contact_normal.y = (x_pred_w_delta1.x - x_pred_w_delta2.x) / n_norm; contact_normal.x = -(x_pred_w_delta1.y - x_pred_w_delta2.y) / n_norm; // tangential_displacement.x = x_pred_w_delta1.x-x_pred_w_delta2.x; // tangential_displacement.y = x_pred_w_delta1.y-x_pred_w_delta2.y; // Above might be wrong // should be tangential_displacement.x = x_pred_w_delta1.x - particles[i]->X.x - (x_pred_w_delta2.x - particles[j]->X.x); tangential_displacement.y = x_pred_w_delta1.y - particles[i]->X.y - (x_pred_w_delta2.y - particles[j]->X.y); project_on_vector(tangential_displacement, contact_normal, out); float out_norm = norm(out); if (out_norm >= mui_static * d) { float coef = min(1., mui_kinematic * d / out_norm); out.x *= coef; out.y *= coef; } delta_X[0].x += -out.x * w_i_coef; delta_X[0].y += -out.y * w_i_coef; delta_X[1].x += -out.x * w_j_coef; delta_X[1].y += -out.y * w_j_coef; active = true; } } } }; class Friction_Constraint : public Constraint { public: // usually easier to keep object moving than start movement, so mu_s>mu_k // some friction results // http://hypertextbook.com/facts/2007/TabraizRasul.shtml static const float mui_static = 0.00026; // 0.021; static const float mui_kinematic = 0.00023; // 0.02; /*typical values: http://spiff.rit.edu/classes/phys211/lectures/fric/fric_all.html smooth 0.05 medium 0.3 rough 1.0 */ int i; int j; float w_i_coef; float w_j_coef; float2 contact_normal; float2 tangential_displacement; float2 x_pred_w_delta1; float2 x_pred_w_delta2; float2 out; float collision_margin; float radius_sq_init; float radius_init; float delta_t; Friction_Constraint(Simulation *sim, int i, int j) : Constraint(sim, 2) { this->i = i; this->j = j; this->indicies[0] = i; this->indicies[1] = j; // TODO if seg fault happens, it is because the particles are set up after // the constraints this->w_i_coef = sim->particles[i]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->w_j_coef = -sim->particles[j]->inv_mass / (sim->particles[i]->inv_mass + sim->particles[j]->inv_mass); this->collision_margin = (sim->particles[i]->r + sim->particles[j]->r) * 1.05f; this->contact_normal = make_float2(0.0, 0.0); this->tangential_displacement = make_float2(0.0, 0.0); this->x_pred_w_delta1 = make_float2(0., 0.); this->x_pred_w_delta2 = make_float2(0., 0.); this->out = make_float2(0., 0.); this->radius_init = (sim->particles[i]->r + sim->particles[j]->r); this->radius_sq_init = radius_init * radius_init; this->delta_t = sim->time_step; } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; delta_X[1].x = 0.0; delta_X[1].y = 0.0; float d = distance(particles[i]->X_pred, particles[j]->X_pred); float f = d - collision_margin; if (f < 0) { contact_normal.x = 0.; contact_normal.y = 0.; tangential_displacement.x = 0.; tangential_displacement.y = 0.; x_pred_w_delta1.x = 0.; x_pred_w_delta1.y = 0.; x_pred_w_delta2.x = 0.; x_pred_w_delta2.y = 0.; out.x = 0.; out.y = 0.; contact_normal.x = (particles[i]->X_pred.x - particles[j]->X_pred.x) / d; contact_normal.y = (particles[i]->X_pred.y - particles[j]->X_pred.y) / d; delta_X[0].x = -w_i_coef * contact_normal.x * f; delta_X[0].y = -w_i_coef * contact_normal.y * f; delta_X[1].x = -w_j_coef * contact_normal.x * f; delta_X[1].y = -w_j_coef * contact_normal.y * f; x_pred_w_delta1.x = delta_X[0].x + particles[i]->X_pred.x; x_pred_w_delta1.y = delta_X[0].y + particles[i]->X_pred.y; x_pred_w_delta2.x = delta_X[1].x + particles[j]->X_pred.x; x_pred_w_delta2.y = delta_X[1].y + particles[j]->X_pred.y; float n_norm = distance(x_pred_w_delta1, x_pred_w_delta2); contact_normal.y = (x_pred_w_delta1.x - x_pred_w_delta2.x) / n_norm; contact_normal.x = -(x_pred_w_delta1.y - x_pred_w_delta2.y) / n_norm; // tangential_displacement.x = x_pred_w_delta1.x-x_pred_w_delta2.x; // tangential_displacement.y = x_pred_w_delta1.y-x_pred_w_delta2.y; // Above might be wrong // should be tangential_displacement.x = x_pred_w_delta1.x - particles[i]->X.x - (x_pred_w_delta2.x - particles[j]->X.x); tangential_displacement.y = x_pred_w_delta1.y - particles[i]->X.y - (x_pred_w_delta2.y - particles[j]->X.y); project_on_vector(tangential_displacement, contact_normal, out); float out_norm = norm(out); if (out_norm >= mui_static * d) { float coef = min(1., mui_kinematic * d / out_norm); out.x *= coef; out.y *= coef; } delta_X[0].x += -out.x * w_i_coef; delta_X[0].y += -out.y * w_i_coef; delta_X[1].x += -out.x * w_j_coef; delta_X[1].y += -out.y * w_j_coef; active = true; } else { float2 x_i = particles[i]->X; float2 x_j = particles[j]->X; const float dist = distance(particles[i]->X, particles[j]->X); float radius_sq = radius_sq_init; if (dist < radius_init) { radius_sq = (radius_init - dist) * (radius_init - dist); } const float v_ix = (particles[i]->X_pred.x - x_i.x) / delta_t; const float v_jx = (particles[j]->X_pred.x - x_j.x) / delta_t; const float v_x = v_ix - v_jx; const float v_iy = (particles[i]->X_pred.y - x_i.y) / delta_t; const float v_jy = (particles[j]->X_pred.y - x_j.y) / delta_t; const float v_y = v_iy - v_jy; float x0 = x_i.x - x_j.x; float y0 = x_i.y - x_j.y; float v_sq = v_x * v_x + v_y * v_y; float x0_sq = x0 * x0; float y0_sq = y0 * y0; float x_sq = x0_sq + y0_sq; float a = v_sq; float b = -v_x * x0 - v_y * y0; float b_sq = b * b; float c = x_sq - radius_sq; float d_sq = b_sq - a * c; if (d_sq > 0 && (a < -_EPSILON || a > _EPSILON)) { float d = sqrtf(d_sq); float tao = (b - d) / a; float tao_alt = (b + d) / a; // pick the min solution that is > 0 tao = tao_alt < tao && tao_alt > 0 ? tao_alt : tao; // need to consider +- sign perhaps? if (tao > 0) { // const float min_tao_init=b/v_sq; const float min_tao = tao + delta_t; // min_tao_init;//(min_tao_init+tao)/2; const float x_i_min = x_i.x + min_tao * v_ix; const float y_i_min = x_i.y + min_tao * v_iy; const float x_j_min = x_j.x + min_tao * v_jx; const float y_j_min = x_j.y + min_tao * v_jy; float min_tao_dist = sqrtf((x_i_min - x_j_min) * (x_i_min - x_j_min) + (y_i_min - y_j_min) * (y_i_min - y_j_min)); float d = min_tao_dist; float f = d - collision_margin; if (f < 0 && d > _EPSILON) { const float clamp_tao = exp(-min_tao * min_tao / 5.); const float k = sim->friction_constraint_stiffness; // 0.25; contact_normal.x = 0.; contact_normal.y = 0.; tangential_displacement.x = 0.; tangential_displacement.y = 0.; x_pred_w_delta1.x = 0.; x_pred_w_delta1.y = 0.; x_pred_w_delta2.x = 0.; x_pred_w_delta2.y = 0.; out.x = 0.; out.y = 0.; contact_normal.x = (x_i_min - x_j_min) / d; contact_normal.y = (y_i_min - y_j_min) / d; delta_X[0].x = -k * clamp_tao * w_i_coef * contact_normal.x * f; delta_X[0].y = -k * clamp_tao * w_i_coef * contact_normal.y * f; delta_X[1].x = -k * clamp_tao * w_j_coef * contact_normal.x * f; delta_X[1].y = -k * clamp_tao * w_j_coef * contact_normal.y * f; active = true; const float x_i_tao = x_i.x + tao * v_ix; const float y_i_tao = x_i.y + tao * v_iy; const float x_j_tao = x_j.x + tao * v_jx; const float y_j_tao = x_j.y + tao * v_jy; x_pred_w_delta1.x = delta_X[0].x + x_i_min; x_pred_w_delta1.y = delta_X[0].y + y_i_min; x_pred_w_delta2.x = delta_X[1].x + x_j_min; x_pred_w_delta2.y = delta_X[1].y + y_j_min; float n_norm = distance(x_pred_w_delta1, x_pred_w_delta2); contact_normal.y = (x_pred_w_delta1.x - x_pred_w_delta2.x) / n_norm; contact_normal.x = -(x_pred_w_delta1.y - x_pred_w_delta2.y) / n_norm; tangential_displacement.x = x_pred_w_delta1.x - x_i_tao - (x_pred_w_delta2.x - x_j_tao); tangential_displacement.y = x_pred_w_delta1.y - y_i_tao - (x_pred_w_delta2.y - y_j_tao); project_on_vector(tangential_displacement, contact_normal, out); float out_norm = norm(out); if (out_norm >= mui_static * d) { float coef = min(1., mui_kinematic * d / out_norm); out.x *= coef; out.y *= coef; } delta_X[0].x += -out.x * w_i_coef; delta_X[0].y += -out.y * w_i_coef; delta_X[1].x += -out.x * w_j_coef; delta_X[1].y += -out.y * w_j_coef; active = true; } } } } } }; class Wall_Constraint : public Constraint { public: // static const float ground_height=GROUND_HEIGHT; int i; // F=mui Fn = mui mg . // static const float // kinematic_friction=30.7*9.81*MS_PER_UPDATE*MS_PER_UPDATE; static const float kinematic_friction = 30000.7 * 9.81 * MS_PER_UPDATE * MS_PER_UPDATE; int wall_idx; float collision_margin; float2 contact_normal; Wall_Constraint(Simulation *sim, int i, int wall_idx) : Constraint(sim, 1) { this->i = i; this->indicies[0] = i; this->wall_idx = wall_idx; this->collision_margin = (sim->particles[i]->r + sim->walls[wall_idx]->width) * 1.05; this->contact_normal = make_float2(0., 0.); } virtual void project(Particle **particles) { delta_X[0].x = 0.0; delta_X[0].y = 0.0; Wall *wall = sim->walls[wall_idx]; float2 p = particles[i]->X_pred; float2 p_prev = particles[i]->X; const float x_hit = (wall->b * (wall->b * p.x - wall->a * p.y) - wall->ac) / wall->ab_sq; // check if between x0,x1 const float chk = (x_hit - wall->x0.x) / wall->t.x; if (chk <= 1 && chk >= 0) { // float d = distance(particles[i]->X, particles[j]->X); // float f = d - collision_margin; const float y_hit = (wall->a * (-wall->b * p.x + wall->a * p.y) - wall->bc) / wall->ab_sq; // const float s=a*p.x+b*p.y+c; const float d = abs(wall->a * p.x + wall->b * p.y + wall->c) / wall->ab_abs; float f = d - collision_margin; if (f < 0) { contact_normal.x = (p.x - x_hit) / d; contact_normal.y = (p.y - y_hit) / d; delta_X[0].x = -contact_normal.x * f; delta_X[0].y = -contact_normal.y * f; active = true; } /* vec2 dir = start - end; float lngth = length(dir); dir /= lngth; vec2 proj = max(0.0, min(lngth, dot((start - p), dir))) * dir; return length( (start - p) - proj ) - (width / 2.0); */ /* const float y_hit=(wall->a*(-wall->b*p.x+wall->a*p.y)-wall->bc)/wall->ab_sq; //const float s=a*p.x+b*p.y+c; //const float dist=abs(s)/ab_abs; //TODO check if ray p_prev-p lies inside object const float c_p=(p_prev.x-x_hit)*wall->n.x+(p_prev.y-y_hit)*wall->n.y; if(c_p<0) { float s=c_p/(wall->n.x*wall->n.x+wall->n.y*wall->n.y); //project (figure out how to add distance later delta_X[0].x=-wall->n.x*s; delta_X[0].y=-wall->n.y*s; active=false; } */ } } }; // TODO class Ground_Constraint : public Constraint { public: // static const float ground_height=GROUND_HEIGHT; int i; // F=mui Fn = mui mg . // static const float // kinematic_friction=30.7*9.81*MS_PER_UPDATE*MS_PER_UPDATE; static const float kinematic_friction = 30000.7 * 9.81 * MS_PER_UPDATE * MS_PER_UPDATE; Ground_Constraint(Simulation *sim, int i) : Constraint(sim, 1) { this->i = i; this->indicies[0] = i; } virtual void project(Particle **particles) { // we don't want to use the bad old values delta_X[0].x = 0.0; delta_X[0].y = 0.0; float f = distance_ground(particles[i]->X_pred, particles[i]->r, GROUND_HEIGHT); if (f < 0) { // particles[i]->Delta_x_ctr+=1; //TODO remove float x_d = particles[i]->X_pred.x - particles[i]->X.x; if (x_d > kinematic_friction) // if(particles[i]->V.x>kinematic_friction) { delta_X[0].x = -kinematic_friction; } else { delta_X[0].x = -x_d; } delta_X[0].y = f; active = true; } } }; //---------------------------------------- double GetSeconds() { // Figure out time elapsed since last call to idle function static struct timeval last_idle_time; static double time = 0.0; struct timeval time_now; gettimeofday(&time_now, NULL); if (last_idle_time.tv_usec == 0) last_idle_time = time_now; float dt = (float)(time_now.tv_sec - last_idle_time.tv_sec) + 1.0e-6 * (time_now.tv_usec - last_idle_time.tv_usec); time += dt; last_idle_time = time_now; return time; } static void error_callback(int error, const char *description) { fprintf(stderr, "Error %d: %s\n", error, description); } void set_camera(int display_w, int display_h, double rotate_camera) { glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(36, (double)display_w / (double)display_h, 0.1, 3000); gluLookAt(0, 180, 50, 0, 0, 0, 0, 1, 0); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); } void drawCircle(float cx, float cy, float r, int num_segments, float3 color) { glColor3f((GLfloat)color.x, (GLfloat)color.y, (GLfloat)color.z); glBegin(GL_POLYGON); for (int ii = 0; ii < num_segments; ii++) { float theta = 2.0f * 3.1635926f * float(ii) / float(num_segments); // get the current angle float x = r * cosf(theta); // calculate the x component float y = r * sinf(theta); // calculate the y component glVertex3f(x + cx, 2., y + cy); // output vertex } glEnd(); } void drawDirection(float cx, float cy, float dirx, float diry) { glColor3f((GLfloat)0.0, (GLfloat)0.0, (GLfloat)0.0); glLineWidth(6.0); // default is 1f glBegin(GL_LINES); glVertex3f(cx, 2., cy); glVertex3f(dirx + cx, 2., diry + cy); glEnd(); } void drawGround(float x1, float y1, float x2, float y2) { glColor3f((GLfloat)0.0, (GLfloat)0.0, (GLfloat)0.0); glLineWidth(4.0); // default is 1f glBegin(GL_LINES); glVertex3f(x1, 2., y1); glVertex3f(x2, 2., y2); glEnd(); } void write_config(Simulation *sim) { FILE *fp_out; std::string path = std::string(OUT_PATH) + std::string("config.txt"); fp_out = fopen(path.c_str(), "w"); if (fp_out != NULL) { fprintf(fp_out, "time_step %f\n", sim->time_step); for (int i = 0; i < sim->num_particles; i++) { fprintf( fp_out, "particle_id %d\tradius %f\tgoal %.3f %.3f\tcolor %.3f %.3f %.3f\n", i, sim->particles[i]->r, sim->particles[i]->goal.x, sim->particles[i]->goal.y, sim->particles[i]->color.x, sim->particles[i]->color.y, sim->particles[i]->color.z); } } fclose(fp_out); } void write_to_file(Simulation *sim) { FILE *fp_out; std::string frame = std::to_string(sim->step_no); std::string path = std::string(OUT_PATH) + frame + std::string(".txt"); fp_out = fopen(path.c_str(), "w"); if (fp_out != NULL) { for (int i = 0; i < sim->num_particles; i++) { fprintf(fp_out, "%d\t%.5f\t%.5f\n", i, sim->particles[i]->X.x, sim->particles[i]->X.y); } } fclose(fp_out); } void update(Simulation *sim) { write_to_file(sim); sim->do_time_step(); // sim->do_time_step_force(); } void draw_particles(Simulation *sim) { for (int i = 0; i < sim->num_particles; i++) { drawCircle(sim->particles[i]->X.x, sim->particles[i]->X.y, 1, 15, sim->particles[i]->color); /* drawDirection(sim->particles[i]->X.x,sim->particles[i]->X.y, sim->planner->velocity_buffer[i].x,sim->planner->velocity_buffer[i].y); */ } for (int i = 0; i < sim->num_walls; i++) { Wall *w = sim->walls[i]; drawGround(w->x0.x, w->x0.y, w->x1.x, w->x1.y); } drawGround(-1000.0, GROUND_HEIGHT, 1000.0, GROUND_HEIGHT); drawGround(-1000.0, GRID_UP_BOUND, 1000.0, GRID_UP_BOUND); drawGround(LEFT_BOUND_X, GROUND_HEIGHT, LEFT_BOUND_X, GROUND_HEIGHT - 1000); drawGround(RIGHT_BOUND_X, GROUND_HEIGHT, RIGHT_BOUND_X, GROUND_HEIGHT - 1000); } int render(Simulation *sim) { int display_w, display_h; double prev_time = 0.0; double elapsed = 0.0; double cur_time = 0.0; double lag = 0.0; BYTE pixels[3 * WIDTH * HEIGHT]; glfwSetErrorCallback(error_callback); if (!glfwInit()) return 1; GLFWwindow *window = glfwCreateWindow(WIDTH, HEIGHT, "ImGui Crowd Sim", NULL, NULL); glfwMakeContextCurrent(window); glEnable(GL_DEPTH_TEST); // Depth Testing glDepthFunc(GL_LEQUAL); glDisable(GL_CULL_FACE); glCullFace(GL_BACK); glfwGetWindowSize(window, &display_w, &display_h); glViewport(0, 0, display_w, display_h); glm::vec3 const &clear_color = glm::vec3(0.827f, 0.827f, 0.827f); prev_time = GetSeconds(); // Main loop int i = 0; while (!glfwWindowShouldClose(window)) { glLoadIdentity(); cur_time = GetSeconds(); elapsed = cur_time - prev_time; prev_time = cur_time; lag += elapsed; while (lag >= MS_PER_UPDATE) { lag -= MS_PER_UPDATE; update(sim); } glClearColor(clear_color.x, clear_color.y, clear_color.z, 0.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); draw_particles(sim); set_camera(display_w, display_h, 0.0 /*rotate_camera+=0.2*/); glfwSwapBuffers(window); glfwPollEvents(); /* glReadPixels(0,0,display_w,display_h, GL_BGR, GL_UNSIGNED_BYTE, pixels); save_file(pixels,"hello",display_h,display_w); i++; if(i>10) break; */ } glfwTerminate(); return 0; } //----------------------------------------- // rendering float rand_interval(float min, float max) { return min + (max - min) * rand() / RAND_MAX; } float2 rand_float2(float minx, float maxx, float miny, float maxy) { return make_float2(rand_interval(minx, maxx), rand_interval(miny, maxy)); } float deg_to_rad(float deg) { const float PI = 3.1415; while (deg >= 360.) { deg -= 360.; } while (deg <= 0.) { deg += 360.; } return PI / 180. * deg; } void dummy_init(Simulation *s) { srand(time(NULL)); int i = 0; float rad = 3.2; float row_init = LEFT_BOUND_X + 3.8 * rad * 50 * 0.5; float height_init = GROUND_HEIGHT - 50; for (int i_ = 0; i_ < ROWS / 2; i_++) { for (int j_ = 0; j_ < COLS; j_++) { float y = height_init - rad * i_ + rand_interval(-0.4, 0.4); float x = row_init + j_ * rad + rand_interval(-0.4, 0.4); s->particles[i] = new Particle(make_float2(x, y), make_float2(0, 0.0), 1.0, 1.0, 0, make_float3(0.0f, 0.0f, 1.0f), make_float2(LEFT_BOUND_X + 2 * rad * 50 * 0.5, y)); i++; } } rad = 3.1; height_init = GROUND_HEIGHT - 50 - 0.11; row_init = LEFT_BOUND_X + 1.4 * rad * 50 * 0.5; for (int i_ = 0; i_ < ROWS / 2; i_++) { for (int j_ = 0; j_ < COLS; j_++) { float y = height_init - rad * i_ + rand_interval(-0.4, 0.4); float x = row_init + j_ * rad + rand_interval(-0.4, 0.4); s->particles[i] = new Particle(make_float2(x, y), make_float2(0, 0.0), 1.0, 1.0, 0, make_float3(1.0f, 0.0f, 0.0f), make_float2(LEFT_BOUND_X + 8 * rad * 50 * 0.5 + x, y)); i++; } } std::vector<particle_tuple *> friction_pairs = get_tuples(s->num_particles); int trig_len = 1 + (s->num_particles * (s->num_particles + 1) / 2); s->stability_upper_trig_arr = (Constraint **)malloc(sizeof(void *) * trig_len); s->collision_upper_trig_arr = (Constraint **)malloc(sizeof(void *) * trig_len); s->powerlaw_upper_trig_arr = (Constraint **)malloc(sizeof(void *) * trig_len); for (std::vector<particle_tuple *>::iterator it = friction_pairs.begin(); it != friction_pairs.end(); ++it) { Stability_Constraint *stab = new Stability_Constraint(s, (*it)->i, (*it)->j); Friction_Constraint *fc = new Friction_Constraint(s, (*it)->i, (*it)->j); Powerlaw_Constraint *pl = new Powerlaw_Constraint(s, (*it)->i, (*it)->j); if ((*it)->i < (*it)->j) { s->collision_map[(*it)->i * s->num_particles + (*it)->j] = fc; int r = (*it)->i; int c = (*it)->j; int t_idx = (s->num_particles * r) + c - (r * (r + 1) * 0.5); s->collision_upper_trig_arr[t_idx] = fc; s->powerlaw_upper_trig_arr[t_idx] = pl; s->stability_upper_trig_arr[t_idx] = stab; } } // set up wall constraints s->num_walls = 2; s->walls = (Wall **)malloc(sizeof(void *) * s->num_walls); s->walls[0] = new Wall(make_float2(-170, GROUND_HEIGHT - 45), make_float2(150., GROUND_HEIGHT - 45), make_float2(0., 1.)); s->walls[1] = new Wall(make_float2(-170, GROUND_HEIGHT - 75), make_float2(150., GROUND_HEIGHT - 75), make_float2(0., -1.)); s->num_constraints = s->num_particles + s->num_particles * s->num_walls; // ground+walls s->constraints = (Constraint **)malloc(sizeof(void *) * s->num_constraints); int constraint_ctr = 0; for (int i = 0; i < s->num_particles; i++) { s->constraints[i] = new Ground_Constraint(s, i); constraint_ctr++; } for (int i = 0; i < s->num_particles; i++) { for (int j = 0; j < s->num_walls; j++) { s->constraints[constraint_ctr] = new Wall_Constraint(s, i, j); constraint_ctr++; } } for (int i = 0; i < s->num_particles; i++) { s->particles[i]->V_pref = V_PREF_ACCEL; float u; do { u = (float)rand() / (float)RAND_MAX; } while (u >= 1.0); s->particles[i]->V_pref += sqrtf(-2.f * logf(1.f - u)) * 0.1f * cosf(2.f * _M_PI * (float)rand() / (float)RAND_MAX); s->planner->calc_pref_v_force(i); s->particles[i]->V.x = s->planner->velocity_buffer[i].x; s->particles[i]->V.y = s->planner->velocity_buffer[i].y; } } int main(int argc, char **argv) { // 0.03 sec - time step, 30 fr/sec int num_particles = ROWS * COLS; Simulation sim(num_particles, 0, MS_PER_UPDATE, "blender.txt"); dummy_init(&sim); write_config(&sim); render(&sim); return 0; }
401abba8452504349a4d6e47860a51ee7ead4cee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> // includes, project #include <cutil.h> // includes, kernels #include <matrixmul_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; int errorM = 0, errorN = 0; srand(52); if(argc != 5 && argc != 4) { // Allocate and initialize the matrices M = AllocateMatrix(rand() % 1024, rand() % 1024, 1); N = AllocateMatrix(M.width, rand() % 1024, 1); P = AllocateMatrix(M.height, N.width, 0); } else { // Allocate and read in matrices from disk int* params = NULL; //(int*)malloc(3 * sizeof(int)); unsigned int data_read = 3; cutReadFilei(argv[1], &params, &data_read, true); if(data_read != 3){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(params[0], params[1], 0); N = AllocateMatrix(params[1], params[2], 0); P = AllocateMatrix(params[0], params[2], 0); errorM = ReadFile(&M, argv[2]); errorN = ReadFile(&N, argv[3]); if(errorM || errorN ) { printf("Error reading input files %d, %d\n", errorM, errorN); return 1; } } struct timeval t1, t2; double elapsed; gettimeofday(&t1, NULL); // M * N on the device MatrixMulOnDevice(M, N, P); gettimeofday(&t2, NULL); elapsed = ((t2.tv_sec + t2.tv_usec/1000000.0) - (t1.tv_sec + t1.tv_usec/1000000.0)); printf("Elapsed Time: %f\n", elapsed); printf("GPU computation complete\n"); gettimeofday(&t1, NULL); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); gettimeofday(&t2, NULL); elapsed = ((t2.tv_sec + t2.tv_usec/1000000.0) - (t1.tv_sec + t1.tv_usec/1000000.0)); printf("Elapsed Time: %f\n", elapsed); printf("CPU computation complete\n"); // in this case check if the result is equivalent to the expected soluion CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P) { size_t Msize = M.width * M.height * sizeof(float); size_t Nsize = N.width * N.height * sizeof(float); size_t Psize = P.width * P.height * sizeof(float); // Load M and N to the device float* Md; float* Nd; hipMalloc(&Md, Msize); hipMalloc(&Nd, Nsize); hipMemcpy(Md, M.elements, Msize, hipMemcpyHostToDevice); hipMemcpy(Nd, N.elements, Nsize, hipMemcpyHostToDevice); // Allocate P on the device float* Pd; hipMalloc(&Pd, Psize); // Writing 0s into device memory, possibly removable hipMemcpy(Pd, P.elements, Psize, hipMemcpyHostToDevice); // Setup the execution configuration int width, height; if(P.width%TILE == 0) width = P.width/TILE; else width = P.width/TILE + 1; if(P.height%TILE == 0) height = P.height/TILE; else height = P.height/TILE + 1; dim3 dimGrid(width, height); dim3 dimBlock(TILE, TILE); // Launch the device computation threads! hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, M.height, M.width, N.width); // Read P from the device hipMemcpy(P.elements, Pd, Psize, hipMemcpyDeviceToHost); // Free device matrices hipFree(&Md); hipFree(&Nd); hipFree(&Pd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } // Read a floating point matrix in from file // Returns zero if the number of elements read is // equals M.height * M.width, and 1 otherwise int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height*M->width; cutReadFilef(file_name, &(M->elements), &data_read, true); return (data_read != (M->height * M->width)); } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); }
401abba8452504349a4d6e47860a51ee7ead4cee.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> // includes, project #include <cutil.h> // includes, kernels #include <matrixmul_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; int errorM = 0, errorN = 0; srand(52); if(argc != 5 && argc != 4) { // Allocate and initialize the matrices M = AllocateMatrix(rand() % 1024, rand() % 1024, 1); N = AllocateMatrix(M.width, rand() % 1024, 1); P = AllocateMatrix(M.height, N.width, 0); } else { // Allocate and read in matrices from disk int* params = NULL; //(int*)malloc(3 * sizeof(int)); unsigned int data_read = 3; cutReadFilei(argv[1], &params, &data_read, true); if(data_read != 3){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(params[0], params[1], 0); N = AllocateMatrix(params[1], params[2], 0); P = AllocateMatrix(params[0], params[2], 0); errorM = ReadFile(&M, argv[2]); errorN = ReadFile(&N, argv[3]); if(errorM || errorN ) { printf("Error reading input files %d, %d\n", errorM, errorN); return 1; } } struct timeval t1, t2; double elapsed; gettimeofday(&t1, NULL); // M * N on the device MatrixMulOnDevice(M, N, P); gettimeofday(&t2, NULL); elapsed = ((t2.tv_sec + t2.tv_usec/1000000.0) - (t1.tv_sec + t1.tv_usec/1000000.0)); printf("Elapsed Time: %f\n", elapsed); printf("GPU computation complete\n"); gettimeofday(&t1, NULL); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); gettimeofday(&t2, NULL); elapsed = ((t2.tv_sec + t2.tv_usec/1000000.0) - (t1.tv_sec + t1.tv_usec/1000000.0)); printf("Elapsed Time: %f\n", elapsed); printf("CPU computation complete\n"); // in this case check if the result is equivalent to the expected soluion CUTBoolean res = cutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P) { size_t Msize = M.width * M.height * sizeof(float); size_t Nsize = N.width * N.height * sizeof(float); size_t Psize = P.width * P.height * sizeof(float); // Load M and N to the device float* Md; float* Nd; cudaMalloc(&Md, Msize); cudaMalloc(&Nd, Nsize); cudaMemcpy(Md, M.elements, Msize, cudaMemcpyHostToDevice); cudaMemcpy(Nd, N.elements, Nsize, cudaMemcpyHostToDevice); // Allocate P on the device float* Pd; cudaMalloc(&Pd, Psize); // Writing 0s into device memory, possibly removable cudaMemcpy(Pd, P.elements, Psize, cudaMemcpyHostToDevice); // Setup the execution configuration int width, height; if(P.width%TILE == 0) width = P.width/TILE; else width = P.width/TILE + 1; if(P.height%TILE == 0) height = P.height/TILE; else height = P.height/TILE + 1; dim3 dimGrid(width, height); dim3 dimBlock(TILE, TILE); // Launch the device computation threads! MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, M.height, M.width, N.width); // Read P from the device cudaMemcpy(P.elements, Pd, Psize, cudaMemcpyDeviceToHost); // Free device matrices cudaFree(&Md); cudaFree(&Nd); cudaFree(&Pd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } // Read a floating point matrix in from file // Returns zero if the number of elements read is // equals M.height * M.width, and 1 otherwise int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height*M->width; cutReadFilef(file_name, &(M->elements), &data_read, true); return (data_read != (M->height * M->width)); } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); }
dcd56564472dc83425c8d77f0a8bcc7d66c48a67.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "ContinuousConvTransposeBackpropFilterOpKernel.h" #include "open3d/ml/Helper.h" #include "open3d/ml/impl/continuous_conv/ContinuousConvTransposeBackpropFilter.cuh" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TReal, class TIndex> class ContinuousConvTransposeBackpropFilterOpKernelCUDA : public ContinuousConvTransposeBackpropFilterOpKernel<TIndex> { public: explicit ContinuousConvTransposeBackpropFilterOpKernelCUDA( OpKernelConstruction* construction) : ContinuousConvTransposeBackpropFilterOpKernel<TIndex>(construction) { texture_alignment = GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filter, const tensorflow::Tensor& out_positions, const tensorflow::Tensor& out_importance, const tensorflow::Tensor& extents, const tensorflow::Tensor& offset, const tensorflow::Tensor& inp_positions, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_neighbors_importance_sum, const tensorflow::Tensor& inp_neighbors_row_splits, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const tensorflow::Tensor& out_features_gradient, const std::vector<int>& filter_dims, const bool individual_extents, const bool isotropic_extents, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& filter_backprop) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), point_importances ? out_importance.flat<TFeat>().data() : nullptr, inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), has_neighbors_importances ? inp_neighbors_importance_sum.flat<TFeat>().data() : nullptr, (int64_t*)inp_neighbors_row_splits.flat<int64>().data(), neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), out_features_gradient.flat<TReal>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); temp_size = ::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), point_importances ? out_importance.flat<TFeat>().data() : nullptr, inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), has_neighbors_importances ? inp_neighbors_importance_sum.flat<TFeat>().data() : nullptr, (int64_t*)inp_neighbors_row_splits.flat<int64>().data(), neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), out_features_gradient.flat<TReal>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, realtype, indextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DContinuousConvTransposeBackpropFilter") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<realtype>("TReal") \ .TypeConstraint<indextype>("TIndex"), \ ContinuousConvTransposeBackpropFilterOpKernelCUDA< \ feattype, outtype, realtype, indextype>); REG_KB(float, float, float, int32) #undef REG_KB
dcd56564472dc83425c8d77f0a8bcc7d66c48a67.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "ContinuousConvTransposeBackpropFilterOpKernel.h" #include "open3d/ml/Helper.h" #include "open3d/ml/impl/continuous_conv/ContinuousConvTransposeBackpropFilter.cuh" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TReal, class TIndex> class ContinuousConvTransposeBackpropFilterOpKernelCUDA : public ContinuousConvTransposeBackpropFilterOpKernel<TIndex> { public: explicit ContinuousConvTransposeBackpropFilterOpKernelCUDA( OpKernelConstruction* construction) : ContinuousConvTransposeBackpropFilterOpKernel<TIndex>(construction) { texture_alignment = GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filter, const tensorflow::Tensor& out_positions, const tensorflow::Tensor& out_importance, const tensorflow::Tensor& extents, const tensorflow::Tensor& offset, const tensorflow::Tensor& inp_positions, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_neighbors_importance_sum, const tensorflow::Tensor& inp_neighbors_row_splits, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const tensorflow::Tensor& out_features_gradient, const std::vector<int>& filter_dims, const bool individual_extents, const bool isotropic_extents, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& filter_backprop) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), point_importances ? out_importance.flat<TFeat>().data() : nullptr, inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), has_neighbors_importances ? inp_neighbors_importance_sum.flat<TFeat>().data() : nullptr, (int64_t*)inp_neighbors_row_splits.flat<int64>().data(), neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), out_features_gradient.flat<TReal>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); temp_size = std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, filter_backprop.flat<TOut>().data(), filter_dims, out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), point_importances ? out_importance.flat<TFeat>().data() : nullptr, inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), has_neighbors_importances ? inp_neighbors_importance_sum.flat<TFeat>().data() : nullptr, (int64_t*)inp_neighbors_row_splits.flat<int64>().data(), neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), out_features_gradient.flat<TReal>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, realtype, indextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DContinuousConvTransposeBackpropFilter") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<realtype>("TReal") \ .TypeConstraint<indextype>("TIndex"), \ ContinuousConvTransposeBackpropFilterOpKernelCUDA< \ feattype, outtype, realtype, indextype>); REG_KB(float, float, float, int32) #undef REG_KB
68f4685ca307915dfd7a46ae57c69e46f8695b3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // error checking macro #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) const size_t DSIZE = 16384; // matrix side dimension const int block_size = 256; // CUDA maximum is 1024 // matrix row-sum kernel __global__ void row_sums(const float *A, float *sums, size_t ds){ int idx = threadIdx.x + blockIdx.x*blockDim.x; // create typical 1D thread index from built-in variables if (idx < ds){ float sum = 0.0f; for (size_t i = 0; i < ds; i++) sum += A[idx*ds+i]; // write a for loop that will cause the thread to iterate across a row, keeeping a running sum, and write the result to sums sums[idx] = sum; }} // matrix column-sum kernel __global__ void column_sums(const float *A, float *sums, size_t ds){ int idx = threadIdx.x + blockIdx.x*blockDim.x; // create typical 1D thread index from built-in variables if (idx < ds){ float sum = 0.0f; for (size_t i = 0; i < ds; i++) sum += A[idx*ds+i]; // write a for loop that will cause the thread to iterate down a column, keeeping a running sum, and write the result to sums sums[idx] = sum; }} bool validate(float *data, size_t sz){ for (size_t i = 0; i < sz; i++) if (data[i] != (float)sz) {printf("results mismatch at %lu, was: %f, should be: %f\n", i, data[i], (float)sz); return false;} return true; } int main(){ float *h_A, *h_sums, *d_A, *d_sums; h_A = new float[DSIZE*DSIZE]; // allocate space for data in host memory h_sums = new float[DSIZE](); for (int i = 0; i < DSIZE*DSIZE; i++) // initialize matrix in host memory h_A[i] = 1.0f; hipMalloc(&d_A, DSIZE*DSIZE*sizeof(float)); // allocate device space for A hipMalloc(&d_sums, DSIZE*sizeof(float)); // allocate device space for vector d_sums cudaCheckErrors("hipMalloc failure"); // error checking // copy matrix A to device: hipMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy H2D failure"); //cuda processing sequence step 1 is complete hipLaunchKernelGGL(( row_sums), dim3((DSIZE+block_size-1)/block_size), dim3(block_size), 0, 0, d_A, d_sums, DSIZE); cudaCheckErrors("kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: hipMemcpy(h_sums, d_sums, DSIZE*sizeof(float), hipMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure"); if (!validate(h_sums, DSIZE)) return -1; printf("row sums correct!\n"); hipMemset(d_sums, 0, DSIZE*sizeof(float)); hipLaunchKernelGGL(( column_sums), dim3((DSIZE+block_size-1)/block_size), dim3(block_size), 0, 0, d_A, d_sums, DSIZE); cudaCheckErrors("kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: hipMemcpy(h_sums, d_sums, DSIZE*sizeof(float), hipMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure"); if (!validate(h_sums, DSIZE)) return -1; printf("column sums correct!\n"); return 0; }
68f4685ca307915dfd7a46ae57c69e46f8695b3b.cu
#include <stdio.h> // error checking macro #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) const size_t DSIZE = 16384; // matrix side dimension const int block_size = 256; // CUDA maximum is 1024 // matrix row-sum kernel __global__ void row_sums(const float *A, float *sums, size_t ds){ int idx = threadIdx.x + blockIdx.x*blockDim.x; // create typical 1D thread index from built-in variables if (idx < ds){ float sum = 0.0f; for (size_t i = 0; i < ds; i++) sum += A[idx*ds+i]; // write a for loop that will cause the thread to iterate across a row, keeeping a running sum, and write the result to sums sums[idx] = sum; }} // matrix column-sum kernel __global__ void column_sums(const float *A, float *sums, size_t ds){ int idx = threadIdx.x + blockIdx.x*blockDim.x; // create typical 1D thread index from built-in variables if (idx < ds){ float sum = 0.0f; for (size_t i = 0; i < ds; i++) sum += A[idx*ds+i]; // write a for loop that will cause the thread to iterate down a column, keeeping a running sum, and write the result to sums sums[idx] = sum; }} bool validate(float *data, size_t sz){ for (size_t i = 0; i < sz; i++) if (data[i] != (float)sz) {printf("results mismatch at %lu, was: %f, should be: %f\n", i, data[i], (float)sz); return false;} return true; } int main(){ float *h_A, *h_sums, *d_A, *d_sums; h_A = new float[DSIZE*DSIZE]; // allocate space for data in host memory h_sums = new float[DSIZE](); for (int i = 0; i < DSIZE*DSIZE; i++) // initialize matrix in host memory h_A[i] = 1.0f; cudaMalloc(&d_A, DSIZE*DSIZE*sizeof(float)); // allocate device space for A cudaMalloc(&d_sums, DSIZE*sizeof(float)); // allocate device space for vector d_sums cudaCheckErrors("cudaMalloc failure"); // error checking // copy matrix A to device: cudaMemcpy(d_A, h_A, DSIZE*DSIZE*sizeof(float), cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H2D failure"); //cuda processing sequence step 1 is complete row_sums<<<(DSIZE+block_size-1)/block_size, block_size>>>(d_A, d_sums, DSIZE); cudaCheckErrors("kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: cudaMemcpy(h_sums, d_sums, DSIZE*sizeof(float), cudaMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure"); if (!validate(h_sums, DSIZE)) return -1; printf("row sums correct!\n"); cudaMemset(d_sums, 0, DSIZE*sizeof(float)); column_sums<<<(DSIZE+block_size-1)/block_size, block_size>>>(d_A, d_sums, DSIZE); cudaCheckErrors("kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: cudaMemcpy(h_sums, d_sums, DSIZE*sizeof(float), cudaMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure"); if (!validate(h_sums, DSIZE)) return -1; printf("column sums correct!\n"); return 0; }
755ba2b71e88729fb78b604f888a7206e980ac78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define FALSE 0 #define TRUE !FALSE #define NUMTHREADS 16 #define THREADWORK 32 __global__ void gpuPMCCNoTest(const float * vectsa, size_t na, const float * vectsb, size_t nb, size_t dim, const float * numPairs, const float * means, const float * sds, float * correlations) { size_t offset, stride, x = blockIdx.x, y = blockIdx.y, tx = threadIdx.x; float a, b, n, scoreA, scoreB; __shared__ float meanA, meanB, sdA, sdB, threadSums[NUMTHREADS]; if((x >= na) || (y >= nb)) return; if(tx == 0) { meanA = means[x*nb*2+y*2]; meanB = means[x*nb*2+y*2+1]; sdA = sds[x*nb*2+y*2]; sdB = sds[x*nb*2+y*2+1]; n = numPairs[x*nb+y]; } __syncthreads(); threadSums[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsa[x * dim + offset]; b = vectsb[y * dim + offset]; scoreA = (a - meanA) / sdA; scoreB = (b - meanB) / sdB; threadSums[tx] += scoreA * scoreB; } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) threadSums[tx] += threadSums[tx + stride]; __syncthreads(); } if(tx == 0) correlations[x*nb+y] = threadSums[0] / (n - 1.f); }
755ba2b71e88729fb78b604f888a7206e980ac78.cu
#include "includes.h" #define FALSE 0 #define TRUE !FALSE #define NUMTHREADS 16 #define THREADWORK 32 __global__ void gpuPMCCNoTest(const float * vectsa, size_t na, const float * vectsb, size_t nb, size_t dim, const float * numPairs, const float * means, const float * sds, float * correlations) { size_t offset, stride, x = blockIdx.x, y = blockIdx.y, tx = threadIdx.x; float a, b, n, scoreA, scoreB; __shared__ float meanA, meanB, sdA, sdB, threadSums[NUMTHREADS]; if((x >= na) || (y >= nb)) return; if(tx == 0) { meanA = means[x*nb*2+y*2]; meanB = means[x*nb*2+y*2+1]; sdA = sds[x*nb*2+y*2]; sdB = sds[x*nb*2+y*2+1]; n = numPairs[x*nb+y]; } __syncthreads(); threadSums[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsa[x * dim + offset]; b = vectsb[y * dim + offset]; scoreA = (a - meanA) / sdA; scoreB = (b - meanB) / sdB; threadSums[tx] += scoreA * scoreB; } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) threadSums[tx] += threadSums[tx + stride]; __syncthreads(); } if(tx == 0) correlations[x*nb+y] = threadSums[0] / (n - 1.f); }
3820c16cac6dbafdc032a270d4a64abc997ad1bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : CUDA_MatrixMul.cu Author : Liuzzo Mauro Version : Copyright : Description : CUDA multiply matrices ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #define TILE_WIDTH 16 static void CheckCudaErrorAux(const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement << " returned " << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; exit(1); } float *generate_random_data(int length) { float *data = (float *)malloc(sizeof(float) * length); for (int i = 0; i < length; i++) { data[i] = ((float)(rand() % 20) - 5) / 5.0f; } return data; } float *generate_fixed_data(int length) { float *data = (float *)malloc(sizeof(float) * length); int value = 1; for (int i = 0; i < length; i++) { data[i] = value++; } return data; } // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns) { // matrix portions to be loaded into the shared memory __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; // thread coordinates computation int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // some useful variables int tx = threadIdx.x; int ty = threadIdx.y; // intermediate value for the calculation of the product float Pvalue = 0; for (int ph = 0; ph < (numAColumns-1) / TILE_WIDTH + 1; ph++) { // verify if the thread has some data to load if (row < numARows && ph * TILE_WIDTH + tx < numAColumns) { Mds[ty][tx] = A[row * numAColumns + ph * TILE_WIDTH + tx]; } else { Mds[ty][tx] = 0.0; } if (col < numBColumns && ph * TILE_WIDTH + ty < numBRows) { Nds[ty][tx] = B[(ph * TILE_WIDTH + threadIdx.y) * numBColumns + col]; } else { Nds[ty][tx] = 0.0; } __syncthreads(); for (int i = 0; i < TILE_WIDTH; i++) { Pvalue += Mds[ty][i] * Nds[i][tx]; } __syncthreads(); } // verify if the thread has some data to write, based on output organization if (row < numARows && col < numBColumns){ C[row*numBColumns + col] = Pvalue; } } void print_matrix(float* matrix, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { printf("%f ", matrix[i * cols + j]); } printf("\n"); } printf("\n"); } int main() { float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows = 2; // number of rows in the matrix A int numAColumns = 3; // number of columns in the matrix A int numBRows = 3; // number of rows in the matrix B int numBColumns = 4; // number of columns in the matrix B int numCRows = numARows; int numCColumns = numBColumns; // allocate host memory hostA = generate_fixed_data(numARows * numAColumns); hostB = generate_fixed_data(numBRows * numBColumns); hostC = (float*)malloc(numCRows * numCColumns * sizeof(float)); // allocate device memory CUDA_CHECK_RETURN( hipMalloc((void **)&deviceA, sizeof(float) * numARows * numAColumns)); CUDA_CHECK_RETURN( hipMalloc((void **)&deviceB, sizeof(float) * numBRows * numBColumns)); CUDA_CHECK_RETURN( hipMalloc((void **)&deviceC, sizeof(float) * numCRows * numCColumns)); // copy from host to device memory CUDA_CHECK_RETURN( hipMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK_RETURN( hipMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), hipMemcpyHostToDevice)); // organize grid dim3 blockDim(16, 16); dim3 gridDim(ceil(((float)numAColumns) / blockDim.x), ceil(((float)numBRows) / blockDim.y)); // execute kernel matrixMultiply << <gridDim, blockDim >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns); hipDeviceSynchronize(); // copy results from device memory to host CUDA_CHECK_RETURN( hipMemcpy(hostC, deviceC, numARows * numBColumns * sizeof(float), hipMemcpyDeviceToHost)); print_matrix(hostA, numARows, numAColumns); print_matrix(hostB, numBRows, numBColumns); print_matrix(hostC, numCRows, numCColumns); // Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); // Free host memory free(hostA); free(hostB); free(hostC); }
3820c16cac6dbafdc032a270d4a64abc997ad1bb.cu
/* ============================================================================ Name : CUDA_MatrixMul.cu Author : Liuzzo Mauro Version : Copyright : Description : CUDA multiply matrices ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #define TILE_WIDTH 16 static void CheckCudaErrorAux(const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement << " returned " << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; exit(1); } float *generate_random_data(int length) { float *data = (float *)malloc(sizeof(float) * length); for (int i = 0; i < length; i++) { data[i] = ((float)(rand() % 20) - 5) / 5.0f; } return data; } float *generate_fixed_data(int length) { float *data = (float *)malloc(sizeof(float) * length); int value = 1; for (int i = 0; i < length; i++) { data[i] = value++; } return data; } // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns) { // matrix portions to be loaded into the shared memory __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; // thread coordinates computation int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // some useful variables int tx = threadIdx.x; int ty = threadIdx.y; // intermediate value for the calculation of the product float Pvalue = 0; for (int ph = 0; ph < (numAColumns-1) / TILE_WIDTH + 1; ph++) { // verify if the thread has some data to load if (row < numARows && ph * TILE_WIDTH + tx < numAColumns) { Mds[ty][tx] = A[row * numAColumns + ph * TILE_WIDTH + tx]; } else { Mds[ty][tx] = 0.0; } if (col < numBColumns && ph * TILE_WIDTH + ty < numBRows) { Nds[ty][tx] = B[(ph * TILE_WIDTH + threadIdx.y) * numBColumns + col]; } else { Nds[ty][tx] = 0.0; } __syncthreads(); for (int i = 0; i < TILE_WIDTH; i++) { Pvalue += Mds[ty][i] * Nds[i][tx]; } __syncthreads(); } // verify if the thread has some data to write, based on output organization if (row < numARows && col < numBColumns){ C[row*numBColumns + col] = Pvalue; } } void print_matrix(float* matrix, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { printf("%f ", matrix[i * cols + j]); } printf("\n"); } printf("\n"); } int main() { float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows = 2; // number of rows in the matrix A int numAColumns = 3; // number of columns in the matrix A int numBRows = 3; // number of rows in the matrix B int numBColumns = 4; // number of columns in the matrix B int numCRows = numARows; int numCColumns = numBColumns; // allocate host memory hostA = generate_fixed_data(numARows * numAColumns); hostB = generate_fixed_data(numBRows * numBColumns); hostC = (float*)malloc(numCRows * numCColumns * sizeof(float)); // allocate device memory CUDA_CHECK_RETURN( cudaMalloc((void **)&deviceA, sizeof(float) * numARows * numAColumns)); CUDA_CHECK_RETURN( cudaMalloc((void **)&deviceB, sizeof(float) * numBRows * numBColumns)); CUDA_CHECK_RETURN( cudaMalloc((void **)&deviceC, sizeof(float) * numCRows * numCColumns)); // copy from host to device memory CUDA_CHECK_RETURN( cudaMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN( cudaMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), cudaMemcpyHostToDevice)); // organize grid dim3 blockDim(16, 16); dim3 gridDim(ceil(((float)numAColumns) / blockDim.x), ceil(((float)numBRows) / blockDim.y)); // execute kernel matrixMultiply << <gridDim, blockDim >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns); cudaDeviceSynchronize(); // copy results from device memory to host CUDA_CHECK_RETURN( cudaMemcpy(hostC, deviceC, numARows * numBColumns * sizeof(float), cudaMemcpyDeviceToHost)); print_matrix(hostA, numARows, numAColumns); print_matrix(hostB, numBRows, numBColumns); print_matrix(hostC, numCRows, numCColumns); // Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); // Free host memory free(hostA); free(hostB); free(hostC); }
fb2e0bd28051da7ee31aa9dd6eb367116271ed07.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string> #include <cstring> #include <cstdlib> #include <iostream> #include <stdio.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <ctime> using namespace std; #define size 1024 __global__ void mykernel(int *transbit, int *pattern, int * result, int pl,int tn, int tp, int *bits) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int i= tid*pl; int support=0; if (i < tp) { const int bitslength = tn; for (int i9 = 0; i9 < tn; i9++) { for (int i2 = 0; i2 < pl; i2++) { int pi = pattern[i+i2]; bits[i9] *= transbit[pi*tn+i9]; } } for(int i2=0;i2 <tn;i2++) { support=support+bits[i2]; } } result[tid]=support; } int main() { const int Ntbits=1000; int transbit[Ntbits] = {1,1,1,0,1,0,1,1,1,0,1,1,1,0,1,1,0,0,1,0,1,1,1,0,0,0,1,0,1,1,1,1,0,1,0,1,1,0,0,0,0,1,0,1,0,0,1,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,1,1,0,1,1,1,1,0,1,0,0,0,1,1,0,1,0,1,0,0,1,1,0,1,1,1,1,0,0,0,1,1,0,1,0,1,1,1,1,1,1,1,0,1,1,0,1,0,0,0,1,1,1,0,0,0,1,0,0,1,1,0,0,0,1,0,1,0,0,1,0,1,0,1,1,0,0,0,0,1,0,0,0,1,1,1,0,0,0,0,0,1,0,1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,0,1,0,0,0,1,0,0,0,1,1,0,1,1,1,1,1,0,0,1,1,0,1,0,1,1,1,0,1,0,0,0,1,1,0,0,1,1,0,0,0,1,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,0,0,1,1,0,1,0,0,1,1,1,1,0,1,1,1,0,0,0,0,1,1,0,0,0,1,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,1,0,0,0,0,0,0,1,0,1,1,1,1,0,0,1,1,0,0,1,0,1,1,1,0,1,1,0,1,0,1,0,1,1,1,0,1,0,0,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,0,1,1,0,0,1,1,0,0,0,1,1,0,1,0,1,1,0,0,1,1,1,1,0,0,1,0,0,0,1,0,1,1,0,1,0,0,0,1,0,0,0,1,1,1,1,1,0,1,0,0,1,1,0,1,0,0,0,1,1,0,1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,0,1,0,1,0,1,1,0,0,0,1,1,1,1,0,1,0,1,1,0,1,0,0,0,1,0,0,1,0,0,1,0,1,1,1,0,0,1,1,1,1,1,1,0,1,1,0,0,1,1,0,1,0,0,0,0,0,1,1,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,0,0,1,0,1,1,0,0,1,0,0,1,0,1,0,1,0,1,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,0,1,0,0,0,0,1,1,1,0,0,1,0,0,0,0,1,1,0,1,0,1,0,1,1,1,0,1,0,0,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,1,0,1,1,1,1,0,0,0,0,1,1,0,0,1,0,1,0,1,0,1,0,1,1,1,1,1,1,0,0,1,0,1,1,1,0,1,1,1,0,1,1,1,1,0,0,0,0,0,1,0,1,1,1,1,0,0,0,1,0,0,1,0,0,1,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,0,1,1,1,1,0,0,0,0,1,0,1,0,0,1,1,0,1,0,0,1,1,0,0,0,0,0,0,1,0,1,1,1,1,0,1,0,1,1,0,0,1,0,1,0,0,0,0,0,1,1,1,1,0,1,1,0,1,1,1,1,0,1,0,0,0,1,0,0,0,1,1,1,1,0,1,0,0,1,0,1,0,1,0,0,1,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,1,1,1,0,1,1,1,0,1,0,0,1,1,1,1,0,0,1,0,1,1,0,1,1,0,1,1,1,0,1,0,1,1,1,1,0,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,1,1,0,1,1,0,0,1,0,0,0,1,1,0,0,1,1,1,0,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,1,1,1,0,0,0,1,0,0,0,1,1,1,0,1,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,1,1,0,0,0,1,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,1,0,0,0,1,1,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,1,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,1,0,0,1,1,1,1,0,1,0,1,1,0,0,0,0,1,0,1,0,0,1,0,0,0,1,0,0,0,1,1,0,0,0,1,1}; const int transNo =100; const int patternLength = 3; int totalpatrn = 2; int bits[transNo] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}; const int Nofpatrn = totalpatrn*patternLength; int pattern[Nofpatrn] = { 0,1,3, 1,2,3 }; int results[Nofpatrn] = {}; int *d_transbit; int *d_pattern; int *d_result; int *d_bits; hipMalloc(&d_transbit, sizeof(int)*Ntbits); hipMalloc(&d_pattern, sizeof(int) * Nofpatrn); hipMalloc(&d_result, sizeof(int) * Nofpatrn); hipMalloc(&d_bits, sizeof(int) * transNo); hipMemcpy(d_transbit, transbit, sizeof(int) * Ntbits, hipMemcpyHostToDevice); hipMemcpy(d_pattern, pattern, sizeof(int) * Nofpatrn, hipMemcpyHostToDevice); hipMemcpy(d_bits, bits, sizeof(int) * transNo, hipMemcpyHostToDevice); hipLaunchKernelGGL(( mykernel), dim3(1), dim3(totalpatrn), 0, 0, d_transbit, d_pattern, d_result, patternLength,transNo,Nofpatrn,d_bits); hipDeviceSynchronize(); hipMemcpy(results, d_result, sizeof(int) * Nofpatrn, hipMemcpyDeviceToHost); printf("GPU result\n"); for (int i2 = 0; i2 < transNo; i2++) { printf("%d", results[i2]); } printf("\n"); clock_t begin = clock(); for (int i = 0; i < 2; i++)// till pattern length { for (int i2 = 0; i2 < patternLength; i2++) { int point = 0; for (int i9 = 0; i9 < transNo; i9++) { bits[i9] = bits[i9] * transbit[point + (pattern[(i+1)*i2] * transNo)]; point++; } printf("\n"); } } clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("%lf", elapsed_secs); hipFree(d_transbit); hipFree(d_pattern); hipFree(d_result); return 0; }
fb2e0bd28051da7ee31aa9dd6eb367116271ed07.cu
#include <stdio.h> #include <string> #include <cstring> #include <cstdlib> #include <iostream> #include <stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <ctime> using namespace std; #define size 1024 __global__ void mykernel(int *transbit, int *pattern, int * result, int pl,int tn, int tp, int *bits) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int i= tid*pl; int support=0; if (i < tp) { const int bitslength = tn; for (int i9 = 0; i9 < tn; i9++) { for (int i2 = 0; i2 < pl; i2++) { int pi = pattern[i+i2]; bits[i9] *= transbit[pi*tn+i9]; } } for(int i2=0;i2 <tn;i2++) { support=support+bits[i2]; } } result[tid]=support; } int main() { const int Ntbits=1000; int transbit[Ntbits] = {1,1,1,0,1,0,1,1,1,0,1,1,1,0,1,1,0,0,1,0,1,1,1,0,0,0,1,0,1,1,1,1,0,1,0,1,1,0,0,0,0,1,0,1,0,0,1,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,1,1,0,1,1,1,1,0,1,0,0,0,1,1,0,1,0,1,0,0,1,1,0,1,1,1,1,0,0,0,1,1,0,1,0,1,1,1,1,1,1,1,0,1,1,0,1,0,0,0,1,1,1,0,0,0,1,0,0,1,1,0,0,0,1,0,1,0,0,1,0,1,0,1,1,0,0,0,0,1,0,0,0,1,1,1,0,0,0,0,0,1,0,1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,0,1,0,0,0,1,0,0,0,1,1,0,1,1,1,1,1,0,0,1,1,0,1,0,1,1,1,0,1,0,0,0,1,1,0,0,1,1,0,0,0,1,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,0,0,1,1,0,1,0,0,1,1,1,1,0,1,1,1,0,0,0,0,1,1,0,0,0,1,1,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,1,0,0,0,0,0,0,1,0,1,1,1,1,0,0,1,1,0,0,1,0,1,1,1,0,1,1,0,1,0,1,0,1,1,1,0,1,0,0,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,0,1,1,0,0,1,1,0,0,0,1,1,0,1,0,1,1,0,0,1,1,1,1,0,0,1,0,0,0,1,0,1,1,0,1,0,0,0,1,0,0,0,1,1,1,1,1,0,1,0,0,1,1,0,1,0,0,0,1,1,0,1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,0,1,0,1,0,1,1,0,0,0,1,1,1,1,0,1,0,1,1,0,1,0,0,0,1,0,0,1,0,0,1,0,1,1,1,0,0,1,1,1,1,1,1,0,1,1,0,0,1,1,0,1,0,0,0,0,0,1,1,1,0,1,0,1,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,0,0,1,0,1,1,0,0,1,0,0,1,0,1,0,1,0,1,1,1,0,0,0,0,1,0,0,0,0,1,1,1,1,1,1,0,1,0,0,0,0,1,1,1,0,0,1,0,0,0,0,1,1,0,1,0,1,0,1,1,1,0,1,0,0,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,1,0,1,0,1,1,1,1,0,0,0,0,1,1,0,0,1,0,1,0,1,0,1,0,1,1,1,1,1,1,0,0,1,0,1,1,1,0,1,1,1,0,1,1,1,1,0,0,0,0,0,1,0,1,1,1,1,0,0,0,1,0,0,1,0,0,1,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,0,1,1,1,1,0,0,0,0,1,0,1,0,0,1,1,0,1,0,0,1,1,0,0,0,0,0,0,1,0,1,1,1,1,0,1,0,1,1,0,0,1,0,1,0,0,0,0,0,1,1,1,1,0,1,1,0,1,1,1,1,0,1,0,0,0,1,0,0,0,1,1,1,1,0,1,0,0,1,0,1,0,1,0,0,1,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,1,1,1,0,1,1,1,0,1,0,0,1,1,1,1,0,0,1,0,1,1,0,1,1,0,1,1,1,0,1,0,1,1,1,1,0,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,1,1,0,1,1,0,0,1,0,0,0,1,1,0,0,1,1,1,0,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,1,1,1,0,0,0,1,0,0,0,1,1,1,0,1,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,1,1,0,0,0,1,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,1,0,0,0,1,1,0,1,0,0,0,0,0,1,0,0,0,0,1,0,0,0,1,1,1,1,0,1,0,1,1,1,0,0,0,0,0,0,1,0,0,1,1,1,1,0,1,0,1,1,0,0,0,0,1,0,1,0,0,1,0,0,0,1,0,0,0,1,1,0,0,0,1,1}; const int transNo =100; const int patternLength = 3; int totalpatrn = 2; int bits[transNo] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}; const int Nofpatrn = totalpatrn*patternLength; int pattern[Nofpatrn] = { 0,1,3, 1,2,3 }; int results[Nofpatrn] = {}; int *d_transbit; int *d_pattern; int *d_result; int *d_bits; cudaMalloc(&d_transbit, sizeof(int)*Ntbits); cudaMalloc(&d_pattern, sizeof(int) * Nofpatrn); cudaMalloc(&d_result, sizeof(int) * Nofpatrn); cudaMalloc(&d_bits, sizeof(int) * transNo); cudaMemcpy(d_transbit, transbit, sizeof(int) * Ntbits, cudaMemcpyHostToDevice); cudaMemcpy(d_pattern, pattern, sizeof(int) * Nofpatrn, cudaMemcpyHostToDevice); cudaMemcpy(d_bits, bits, sizeof(int) * transNo, cudaMemcpyHostToDevice); mykernel<<<1, totalpatrn>>>(d_transbit, d_pattern, d_result, patternLength,transNo,Nofpatrn,d_bits); cudaDeviceSynchronize(); cudaMemcpy(results, d_result, sizeof(int) * Nofpatrn, cudaMemcpyDeviceToHost); printf("GPU result\n"); for (int i2 = 0; i2 < transNo; i2++) { printf("%d", results[i2]); } printf("\n"); clock_t begin = clock(); for (int i = 0; i < 2; i++)// till pattern length { for (int i2 = 0; i2 < patternLength; i2++) { int point = 0; for (int i9 = 0; i9 < transNo; i9++) { bits[i9] = bits[i9] * transbit[point + (pattern[(i+1)*i2] * transNo)]; point++; } printf("\n"); } } clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; printf("%lf", elapsed_secs); cudaFree(d_transbit); cudaFree(d_pattern); cudaFree(d_result); return 0; }
baedbca5d39e83679c0dae4d60edc991c76f1462.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "../common/helper_cuda.h" // // kernel routine // __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float) threadIdx.x; } // // main code // int main(int argc, const char **argv) { float *h_x, *d_x; int nblocks, nthreads, nsize, n; // initialise card findCudaDevice(argc, argv); // set number of blocks, and threads per block nblocks = 2; nthreads = 8; nsize = nblocks*nthreads ; // allocate memory for array h_x = (float *)malloc(nsize*sizeof(float)); checkCudaErrors(hipMalloc((void **)&d_x, nsize*sizeof(float))); // execute kernel hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x); getLastCudaError("my_first_kernel execution failed\n"); // copy back results and print them out checkCudaErrors( hipMemcpy(h_x,d_x,nsize*sizeof(float), hipMemcpyDeviceToHost) ); for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]); // free memory checkCudaErrors(hipFree(d_x)); free(h_x); // CUDA exit -- needed to flush printf write buffer hipDeviceReset(); return 0; }
baedbca5d39e83679c0dae4d60edc991c76f1462.cu
// // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "../common/helper_cuda.h" // // kernel routine // __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float) threadIdx.x; } // // main code // int main(int argc, const char **argv) { float *h_x, *d_x; int nblocks, nthreads, nsize, n; // initialise card findCudaDevice(argc, argv); // set number of blocks, and threads per block nblocks = 2; nthreads = 8; nsize = nblocks*nthreads ; // allocate memory for array h_x = (float *)malloc(nsize*sizeof(float)); checkCudaErrors(cudaMalloc((void **)&d_x, nsize*sizeof(float))); // execute kernel my_first_kernel<<<nblocks,nthreads>>>(d_x); getLastCudaError("my_first_kernel execution failed\n"); // copy back results and print them out checkCudaErrors( cudaMemcpy(h_x,d_x,nsize*sizeof(float), cudaMemcpyDeviceToHost) ); for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]); // free memory checkCudaErrors(cudaFree(d_x)); free(h_x); // CUDA exit -- needed to flush printf write buffer cudaDeviceReset(); return 0; }
5e20db4b5d92b61886ea460d4eec16d6b6796d7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zpotf2.cu, normal z -> c, Thu Oct 8 23:05:35 2020 */ #include "magma_internal.h" #define COMPLEX #define cdotc_max_bs 512 // 512 is max threads for 1.x cards void cpotf2_csscal( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ); void cpotf2_cdotc( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ); #ifdef COMPLEX void magmablas_clacgv( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ); #endif // TODO: this function could be in .cpp file -- it has no CUDA code in it. /***************************************************************************//** Purpose ------- cpotf2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**H * U, if UPLO = MagmaUpper, or A = L * L**H, if UPLO = MagmaLower, where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. N >= 0 and N <= 512. @param[in,out] dA COMPLEX array, dimension (LDDA,N) On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. \n On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**H * U or A = L * L**H. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,N). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. @ingroup magma_potf2 *******************************************************************************/ extern "C" magma_int_t magma_cpotf2_gpu( magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t j; *info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { *info = -1; } else if (n < 0 || n > cdotc_max_bs) { *info = -2; } else if (ldda < max(1,n)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (n == 0) { return *info; } magmaFloatComplex alpha = MAGMA_C_NEG_ONE; magmaFloatComplex beta = MAGMA_C_ONE; if (uplo == MagmaUpper) { for (j = 0; j < n; j++) { cpotf2_cdotc( j, dA(0,j), 1, queue ); // including cdotc product and update a(j,j) if (j < n) { #ifdef COMPLEX magmablas_clacgv( j, dA(0, j), 1, queue ); #endif magma_cgemv( MagmaTrans, j, n-j-1, alpha, dA(0, j+1), ldda, dA(0, j), 1, beta, dA(j, j+1), ldda, queue ); #ifdef COMPLEX magmablas_clacgv( j, dA(0, j), 1, queue ); #endif cpotf2_csscal( n-j, dA(j,j), ldda, queue ); } } } else { for (j = 0; j < n; j++) { cpotf2_cdotc( j, dA(j,0), ldda, queue ); // including cdotc product and update a(j,j) if (j < n) { #ifdef COMPLEX magmablas_clacgv( j, dA(j, 0), ldda, queue ); #endif magma_cgemv( MagmaNoTrans, n-j-1, j, alpha, dA(j+1, 0), ldda, dA(j,0), ldda, beta, dA(j+1, j), 1, queue ); #ifdef COMPLEX magmablas_clacgv( j, dA(j, 0), ldda, queue ); #endif cpotf2_csscal( n-j, dA(j,j), 1, queue ); } } } return *info; } #define csscal_bs 32 #define cdotc_bs 512 #define clacgv_bs 512 // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ float shared_data[]; __global__ void kernel_cdotc(int n, magmaFloatComplex *x, int incx, int threadSize) { int tx = threadIdx.x; float *sdata = shared_data; magmaFloatComplex res = MAGMA_C_ZERO; if (tx < n) { res = x[tx*incx]; } sdata[tx] = MAGMA_C_REAL(res * MAGMA_C_CONJ(res)); __syncthreads(); for (int s = blockDim.x/2; s > 32; s >>= 1 ) { if (tx < s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if (tx < 32) { volatile float* smem = sdata; smem[tx] += smem[tx+32]; smem[tx] += smem[tx+16]; smem[tx] += smem[tx+8]; smem[tx] += smem[tx+4]; smem[tx] += smem[tx+2]; smem[tx] += smem[tx+1]; } if (tx == 0) { float xreal = MAGMA_C_REAL(x[n*incx]); x[n*incx] = MAGMA_C_MAKE( sqrt(xreal - sdata[0]), 0 ); } } void cpotf2_cdotc( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ) { /* Specialized Cdotc 1) performs cdotc sum = x[0:n-1]*conj(x[0:n-1]) 2) updates x[n] = sqrt(x[n]-sum); */ if (n > cdotc_max_bs) { fprintf( stderr, "n = %lld > %lld is not supported in cpotf2_cdotc\n", (long long) n, (long long) cdotc_max_bs ); return; } int threadSize; if (n <= 1024 && n > 512) { threadSize = 1024; } else if (n <= 512 && n > 256 ) { threadSize = 512; } else if (n <= 256 && n > 128) { threadSize = 256; } else if (n <= 128 && n > 64) { threadSize = 128; } else { threadSize = 64; } size_t shmem = threadSize * sizeof(float); hipLaunchKernelGGL(( kernel_cdotc) , dim3(1), dim3(threadSize), shmem, queue->cuda_stream() , n, x, incx, threadSize); } __global__ void kernel_csscal(int n, magmaFloatComplex *x, int incx) { int id = blockIdx.x * csscal_bs + threadIdx.x; __shared__ magmaFloatComplex factor; if (threadIdx.x == 0) { factor = MAGMA_C_MAKE(1.0/MAGMA_C_REAL(x[0]), 0.0); } __syncthreads(); if ( id < n && id > 0) { x[id*incx] = x[id*incx] * factor; } } void cpotf2_csscal( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ) { /* Specialized csscal perform x[1:n-1] / x[0] */ dim3 threads(csscal_bs, 1, 1); int num_blocks = magma_ceildiv( n, csscal_bs ); dim3 grid(num_blocks,1); hipLaunchKernelGGL(( kernel_csscal) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, x, incx); } #ifdef COMPLEX __global__ void kernel_clacgv(int n, magmaFloatComplex *x, int incx) { int id = blockIdx.x * clacgv_bs + threadIdx.x; if ( id < n ) { x[id*incx] = MAGMA_C_CONJ(x[id*incx]); } } /***************************************************************************//** Purpose ------- CLACGV conjugates a complex vector of length N. Arguments --------- @param[in] n INTEGER The length of the vector X. N >= 0. @param[in,out] x COMPLEX array, dimension (1+(N-1)*abs(INCX)) On entry, the vector of length N to be conjugated. On exit, X is overwritten with conjg(X). @param[in] incx INTEGER The spacing between successive elements of X. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacgv *******************************************************************************/ void magmablas_clacgv( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ) { dim3 threads(clacgv_bs, 1, 1); int num_blocks = magma_ceildiv( n, clacgv_bs ); dim3 grid(num_blocks,1); hipLaunchKernelGGL(( kernel_clacgv) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, x, incx); } #endif // COMPLEX
5e20db4b5d92b61886ea460d4eec16d6b6796d7c.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zpotf2.cu, normal z -> c, Thu Oct 8 23:05:35 2020 */ #include "magma_internal.h" #define COMPLEX #define cdotc_max_bs 512 // 512 is max threads for 1.x cards void cpotf2_csscal( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ); void cpotf2_cdotc( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ); #ifdef COMPLEX void magmablas_clacgv( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ); #endif // TODO: this function could be in .cpp file -- it has no CUDA code in it. /***************************************************************************//** Purpose ------- cpotf2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**H * U, if UPLO = MagmaUpper, or A = L * L**H, if UPLO = MagmaLower, where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. N >= 0 and N <= 512. @param[in,out] dA COMPLEX array, dimension (LDDA,N) On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. \n On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**H * U or A = L * L**H. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,N). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. @ingroup magma_potf2 *******************************************************************************/ extern "C" magma_int_t magma_cpotf2_gpu( magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { #define dA(i_, j_) (dA + (i_) + (j_)*ldda) magma_int_t j; *info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { *info = -1; } else if (n < 0 || n > cdotc_max_bs) { *info = -2; } else if (ldda < max(1,n)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (n == 0) { return *info; } magmaFloatComplex alpha = MAGMA_C_NEG_ONE; magmaFloatComplex beta = MAGMA_C_ONE; if (uplo == MagmaUpper) { for (j = 0; j < n; j++) { cpotf2_cdotc( j, dA(0,j), 1, queue ); // including cdotc product and update a(j,j) if (j < n) { #ifdef COMPLEX magmablas_clacgv( j, dA(0, j), 1, queue ); #endif magma_cgemv( MagmaTrans, j, n-j-1, alpha, dA(0, j+1), ldda, dA(0, j), 1, beta, dA(j, j+1), ldda, queue ); #ifdef COMPLEX magmablas_clacgv( j, dA(0, j), 1, queue ); #endif cpotf2_csscal( n-j, dA(j,j), ldda, queue ); } } } else { for (j = 0; j < n; j++) { cpotf2_cdotc( j, dA(j,0), ldda, queue ); // including cdotc product and update a(j,j) if (j < n) { #ifdef COMPLEX magmablas_clacgv( j, dA(j, 0), ldda, queue ); #endif magma_cgemv( MagmaNoTrans, n-j-1, j, alpha, dA(j+1, 0), ldda, dA(j,0), ldda, beta, dA(j+1, j), 1, queue ); #ifdef COMPLEX magmablas_clacgv( j, dA(j, 0), ldda, queue ); #endif cpotf2_csscal( n-j, dA(j,j), 1, queue ); } } } return *info; } #define csscal_bs 32 #define cdotc_bs 512 #define clacgv_bs 512 // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ float shared_data[]; __global__ void kernel_cdotc(int n, magmaFloatComplex *x, int incx, int threadSize) { int tx = threadIdx.x; float *sdata = shared_data; magmaFloatComplex res = MAGMA_C_ZERO; if (tx < n) { res = x[tx*incx]; } sdata[tx] = MAGMA_C_REAL(res * MAGMA_C_CONJ(res)); __syncthreads(); for (int s = blockDim.x/2; s > 32; s >>= 1 ) { if (tx < s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if (tx < 32) { volatile float* smem = sdata; smem[tx] += smem[tx+32]; smem[tx] += smem[tx+16]; smem[tx] += smem[tx+8]; smem[tx] += smem[tx+4]; smem[tx] += smem[tx+2]; smem[tx] += smem[tx+1]; } if (tx == 0) { float xreal = MAGMA_C_REAL(x[n*incx]); x[n*incx] = MAGMA_C_MAKE( sqrt(xreal - sdata[0]), 0 ); } } void cpotf2_cdotc( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ) { /* Specialized Cdotc 1) performs cdotc sum = x[0:n-1]*conj(x[0:n-1]) 2) updates x[n] = sqrt(x[n]-sum); */ if (n > cdotc_max_bs) { fprintf( stderr, "n = %lld > %lld is not supported in cpotf2_cdotc\n", (long long) n, (long long) cdotc_max_bs ); return; } int threadSize; if (n <= 1024 && n > 512) { threadSize = 1024; } else if (n <= 512 && n > 256 ) { threadSize = 512; } else if (n <= 256 && n > 128) { threadSize = 256; } else if (n <= 128 && n > 64) { threadSize = 128; } else { threadSize = 64; } size_t shmem = threadSize * sizeof(float); kernel_cdotc <<< 1, threadSize, shmem, queue->cuda_stream() >>> (n, x, incx, threadSize); } __global__ void kernel_csscal(int n, magmaFloatComplex *x, int incx) { int id = blockIdx.x * csscal_bs + threadIdx.x; __shared__ magmaFloatComplex factor; if (threadIdx.x == 0) { factor = MAGMA_C_MAKE(1.0/MAGMA_C_REAL(x[0]), 0.0); } __syncthreads(); if ( id < n && id > 0) { x[id*incx] = x[id*incx] * factor; } } void cpotf2_csscal( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ) { /* Specialized csscal perform x[1:n-1] / x[0] */ dim3 threads(csscal_bs, 1, 1); int num_blocks = magma_ceildiv( n, csscal_bs ); dim3 grid(num_blocks,1); kernel_csscal <<< grid, threads, 0, queue->cuda_stream() >>> (n, x, incx); } #ifdef COMPLEX __global__ void kernel_clacgv(int n, magmaFloatComplex *x, int incx) { int id = blockIdx.x * clacgv_bs + threadIdx.x; if ( id < n ) { x[id*incx] = MAGMA_C_CONJ(x[id*incx]); } } /***************************************************************************//** Purpose ------- CLACGV conjugates a complex vector of length N. Arguments --------- @param[in] n INTEGER The length of the vector X. N >= 0. @param[in,out] x COMPLEX array, dimension (1+(N-1)*abs(INCX)) On entry, the vector of length N to be conjugated. On exit, X is overwritten with conjg(X). @param[in] incx INTEGER The spacing between successive elements of X. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacgv *******************************************************************************/ void magmablas_clacgv( magma_int_t n, magmaFloatComplex *x, magma_int_t incx, magma_queue_t queue ) { dim3 threads(clacgv_bs, 1, 1); int num_blocks = magma_ceildiv( n, clacgv_bs ); dim3 grid(num_blocks,1); kernel_clacgv <<< grid, threads, 0, queue->cuda_stream() >>> (n, x, incx); } #endif // COMPLEX
99428351775c01c56cea0ba9c2debda870c87a2a.hip
// !!! This is a file automatically generated by hipify!!! /// LMU Muenchen ProjektArbeit Robert Noll 2007 : Similarity Join mittels Grafikprozessor #ifdef _WIN32 #define NOMINMAX #endif // includes #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> #include <hip/hip_runtime.h> #include <sm_11_atomic_functions.h> // note : for threads you need windows.h from the platform sdk : // http://www.microsoft.com/downloads/details.aspx?FamilyId=A55B6B43-E24F-4EA3-A93E-40C0EC4F68E5&displaylang=en #include "threads.h" #undef assert //~ #define assert(code) printf("assert %s : (%s)\n",(int)(code) ? "passed" : "FAILED",#code); if (!(int)(code)) exit(0); #define assert(code) if (!(int)(code)) { printf("assert FAILED : (%s)\n",#code); exit(0); } #define N_FILE_1MB_PER_DIM (1024*1024 / 4) // = 9mb for D=9 floats.. // N_FILE_1MB_PER_DIM = 9mb for 9 floats.. // N_FILE_1MB_PER_DIM*10 = 90mb // N_FILE_1MB_PER_DIM*10*4 = 360mb // input parameters #define D (8) #define N_FILE (N_FILE_1MB_PER_DIM * 1 / D) #define TEST_EPSILON 1.0 //~ #define GPU_NESTED_LOOP // nested loop on gpu, don't use index //~ #define ENABLE_GPU_WRITEOUTLIST //~ #define ENABLE_GPU_WRITEOUTLIST_BIG_RESULT_LIST // activates code for the case that the result list doesn't fit in vram and has to be written out multiple times #define ENABLE_MEASUREMENT_GPU //~ #define ENABLE_MEASUREMENT_CPU_IDX // don't enable both cpu_idx and nest at the same time //~ #define ENABLE_MEASUREMENT_CPU_NEST // don't enable both cpu_idx and nest at the same time #define ENABLE_GPU_IDX3 #define ENABLE_CPU_IDX3 // filepath (todo : read from commandline) //~ #define kDataFilePath "data/coarse-strong-ultra-100-96_10000.txt" // ??? format unknown.. very big lines... #define kDataFilePath "data/Corel_ColorMoments_9d.ascii" // 68040 lines : n=65536 //~ #define kDataFilePath "data/Corel_CoocTexture_16d.ascii" // 68040 lines : n=65536 //~ #define kDataFilePath "data/smalldata.ascii" // 16383 lines : n=16384 #define kReportFile "report.txt" // random data generation //~ #define DATA_PURE_RANDOM //~ #define DATA_PURE_RANDOM_MIN (0.0) //~ #define DATA_PURE_RANDOM_MAX (8.0) #define DATA_CLUST_RANDOM #define DATA_CLUST_RANDOM_NUM_CLUST 6 #define DATA_CLUST_RANDOM_RAD_MIN 1.0 #define DATA_CLUST_RANDOM_RAD_MAX 2.0 #define DATA_CLUST_RANDOM_CENTER_MIN 2.0 #define DATA_CLUST_RANDOM_CENTER_MAX 6.0 // constants #define MB (1024 * 1024) // bytes per megabyte #define GRIDHEIGHT 4 #define kThreadBlockSize (64) // x*64:,64,128,192,256 (see cuda-guide page 65) #define kWrapSize (32) // some code in the kernel might depend on this being exactly 32 #define SIZE_RESULTLIST (64 * MB) // 16 mb depends on graka mem, but not needed dynamically #define kMaxResults (SIZE_RESULTLIST/sizeof(ulong2)) // 1 result = 2 uint32 indices #define kLastValidResultIndex (kMaxResults-1) #define kGPU_COUNT 1 #define I0 (16) // index segment-count (max:254) // should be power of two for best performance //#define I0 (64) // index segment-count (max:254) // segment sizes #define I03 (I0*I0*I0) // I0=32 -> 32k*9*4=1mb , I0=16 -> 4k*9*4=144k #define SZ (int(N_FILE + I03-1)/I03) // round upwards #define SY (SZ*I0) #define SX (SZ*I0*I0) #define N (SZ*I0*I0*I0) // rounded upwards, N >= N_FILE , this is the amount of data allocated // DATASIZE for SZ = 254 = 254*64*64*64 / 1024 / 1024 * 4 * 9 = ca 2286mb // DATASIZE for SZ = 32 = 32*64*64*64 / 1024 / 1024 * 4 * 9 = ca 288mb // DATASIZE for SZ = 1 = 1*64*64*64 / 1024 / 1024 * 4 * 9 = ca 9mb // SZ may not be larger than 254, increase I0 if that happens // SZ=254 + I0=254 allows for (254^4) lines of data, which is about 139 gigabyte, that should be enough for a while // increase index type from uchar4 to ushort4 if not.... (DONE, as it's only 254^3...) // index level starts #define INDEXSTART_0 (0) #define INDEXSTART_1 ((I0+1)) #define INDEXSTART_2 ((I0+1) + (I0+1)*I0) #define INDEX_END ((I0+1) + (I0+1)*I0 + (I0+1)*I0*I0) #define SIZE_INDEX (INDEX_END * sizeof(float)) // (64 + 64*63 + 64*63*63) * sizeof(float) = ca 1mb : 3 level // index position calc #define INDEXPOS_0(x) (INDEXSTART_0 + (x) ) // x<=I0 #define INDEXPOS_1(x,y) (INDEXSTART_1 + (y) + (x)*(I0+1) ) // x<I0 y<=I0 #define INDEXPOS_2(x,y,z) (INDEXSTART_2 + (z) + (y)*(I0+1) + (x)*(I0+1)*I0 ) // *<I0 z<=I0 // n = iNumLines = number of data-points #define DATASIZE_IN_RAW (N*D*sizeof(float)) #define DATASIZE_IN_STATE ((N/kThreadBlockSize)*sizeof(uint4)) // this is too big, but easier this way #define DATASIZE_IN_INDEX (SIZE_INDEX) #define DATASIZE_IN_TOTAL (DATASIZE_IN_RAW + DATASIZE_IN_STATE + DATASIZE_IN_INDEX) #define DATASIZE_OUT_ATOM (sizeof(unsigned int) * 2) #define DATASIZE_OUT_LIST (SIZE_RESULTLIST) #define DATASIZE_OUT_TOTAL (DATASIZE_OUT_ATOM + DATASIZE_OUT_LIST) #define kStateEndValue 0x2fffFFFF // forward declarations int ReadTextData (const char* szFilePath,float* pData); #include "utils.cu" #include "idx_kernel.cu" #include "idx_prepare.cu" #include "idx_cpu.cu" #include "nestedloop.cu" #include <time.h> float* gpDataIn_Raw = 0; float* gpDataIn_Index = 0; // ##### ##### ##### ##### ##### dataparts for threads #define kMaxPartCount I0 #define kMaxPartDataSize (9 * 4 * 11 * MB) // = 396mb = ok on both devices typedef struct { int iPartID; int iPartElementsFirst; int iPartElementsLast; int iPartViewFirst; int iPartViewLast; int iViewDataOffsetInFloats; ///< global offset int iViewDataOffsetInPoints; ///< global offset int iViewDataSize; ///< in bytes int iElementOffsetInPoints; ///< offset relative to part start int iElementCount; ///< in datapoints int iResultCount; int iNumThreadBlocks; int iDataSizeBounds; float fEpsilon; unsigned int* pDataOut_Atom; ulong2* pDataOut_List; float3* pDataIn_Bounds; uint4* pDataInDebug_State; } cDataPart; cDataPart gDataParts[kMaxPartCount]; bool cDataPart_Prepare (cDataPart* p,int iPartID,int iPartCount,float e) { p->iPartID = iPartID; p->fEpsilon = e; p->iResultCount = 0; p->pDataOut_Atom = 0; p->pDataOut_List = 0; p->pDataIn_Bounds = 0; p->pDataInDebug_State = 0; // determine the datarange that will be managed by this part/thread int iMaxPartLen = (I0 + iPartCount - 1)/iPartCount; // round up p->iPartElementsFirst = iPartID*iMaxPartLen; // each element gets its own thread later p->iPartElementsLast = min(p->iPartElementsFirst+iMaxPartLen-1,I0-1); //~ // determine the view = the area in the data that has to be scanned, e.g. elements + epsilon border p->iPartViewFirst = 0; // the view is the data range that has to be examined p->iPartViewLast = I0-1; // inclusive #define GET_MIN_0(x) gpDataIn_Index[INDEXPOS_0(x)] #define GET_MAX_0(x) GET_MIN_0((x+1)) float fMin = GET_MIN_0(p->iPartElementsFirst ) - p->fEpsilon; float fMax = GET_MAX_0(p->iPartElementsLast ) + p->fEpsilon; // max(i)=min(i+1) while (p->iPartViewFirst < I0-1 && GET_MAX_0(p->iPartViewFirst) < fMin) ++p->iPartViewFirst; while (p->iPartViewLast > 0 && GET_MIN_0(p->iPartViewLast) > fMax) --p->iPartViewLast; assert(p->iPartViewFirst <= p->iPartElementsFirst); assert(p->iPartViewLast >= p->iPartElementsLast); // calc mem usage and positions p->iViewDataSize = (p->iPartViewLast+1 - p->iPartViewFirst ) * SX * D * sizeof(float); // in bytes p->iElementCount = (p->iPartElementsLast+1 - p->iPartElementsFirst ) * SX; // in datapoints p->iViewDataOffsetInPoints = p->iPartViewFirst * SX; p->iViewDataOffsetInFloats = p->iPartViewFirst * SX * D; p->iElementOffsetInPoints = (p->iPartElementsFirst - p->iPartViewFirst) * SX; p->iNumThreadBlocks = p->iElementCount / kThreadBlockSize; p->iDataSizeBounds = p->iNumThreadBlocks*2*sizeof(float3); // print a little debug info printf("parts=%d [%d]: elem[%d,%d] view[%d,%d] datasize=%dMB\n",iPartCount,iPartID, p->iPartElementsFirst,p->iPartElementsLast, p->iPartViewFirst,p->iPartViewLast,p->iViewDataSize/MB); //~ // check if this part fits in vram if (p->iViewDataSize > kMaxPartDataSize) return false; return true; } /// malloc might not be threadsafe void cDataPart_Alloc (cDataPart* p) { p->pDataIn_Bounds = (float3*) malloc(p->iDataSizeBounds); p->pDataOut_Atom = (unsigned int*) malloc(DATASIZE_OUT_ATOM); #ifdef ENABLE_GPU_WRITEOUTLIST p->pDataOut_List = (ulong2*) malloc(DATASIZE_OUT_LIST); #endif p->pDataInDebug_State = (uint4*) malloc(DATASIZE_IN_STATE); } void cDataPart_Free (cDataPart* p) { #define FREE_AND_ZERO(x) if (x) free(x); x = 0; FREE_AND_ZERO(p->pDataIn_Bounds); FREE_AND_ZERO(p->pDataOut_Atom); FREE_AND_ZERO(p->pDataOut_List); FREE_AND_ZERO(p->pDataInDebug_State); } //////////////////////////////////////////////////////////////////////////////// // GPU thread //////////////////////////////////////////////////////////////////////////////// static CUT_THREADPROC gpuThread (int* piPartID) { int iPartID = *piPartID; cDataPart* p = &gDataParts[iPartID]; int iGPUId = iPartID % kGPU_COUNT; CUDA_SAFE_CALL(hipSetDevice(iGPUId)); printf("run:%d,%d (...,e=%f,%d,%d) off=%d,size=%d,el=%d\n", iPartID,iGPUId, p->fEpsilon,p->iElementOffsetInPoints,p->iViewDataOffsetInPoints, p->iViewDataOffsetInFloats,p->iViewDataSize,p->iElementCount); #define HANDLE_ERROR(x) myLastErr = hipGetLastError(); if (myLastErr != 0) printf("%s : %d(%s)\n",x,(int)myLastErr,(myLastErr != 0) ? hipGetErrorString(myLastErr) : "ok"); hipError_t myLastErr; uint4* pDataInDebug_State = p->pDataInDebug_State; // calculate bounds of individual threadblocks // this part is only done once at startup, so performance doesn't really matter much if (1) { float* pElements = &gpDataIn_Raw[p->iViewDataOffsetInFloats + p->iElementOffsetInPoints*D]; float e = p->fEpsilon; float3 vMin,vMax,vCur; // foreach threadblock... for (int iBlockIdx=0;iBlockIdx < p->iNumThreadBlocks;++iBlockIdx) { // calculate the bounds for the wrap, for the first 3 dimensions { for (int d=0;d<kThreadBlockSize;++d) { // compiler should loop-unroll vCur.x = pElements[(iBlockIdx * kThreadBlockSize + d)*D + 0]; vCur.y = pElements[(iBlockIdx * kThreadBlockSize + d)*D + 1]; vCur.z = pElements[(iBlockIdx * kThreadBlockSize + d)*D + 2]; if (d > 0) { vMin.x = min(vMin.x,vCur.x); vMin.y = min(vMin.y,vCur.y); vMin.z = min(vMin.z,vCur.z); vMax.x = max(vMax.x,vCur.x); vMax.y = max(vMax.y,vCur.y); vMax.z = max(vMax.z,vCur.z); } else { vMin = vCur; vMax = vCur; } }} // add epsilon to the edges of the bounds vMin.x = vMin.x - e; vMin.y = vMin.y - e; vMin.z = vMin.z - e; vMax.x = vMax.x + e; vMax.y = vMax.y + e; vMax.z = vMax.z + e; p->pDataIn_Bounds[iBlockIdx*2+0] = vMin; p->pDataIn_Bounds[iBlockIdx*2+1] = vMax; } } // alloc and init device memory (vram) float* pDataInD_Raw = NULL; float* pDataInD_Index = NULL; uint4* pDataInD_State = NULL; float3* pDataInD_Bounds = NULL; unsigned int* pDataOutD_Atom = NULL; ulong2* pDataOutD_List = NULL; CUDA_SAFE_CALL( hipMalloc( (void**) &pDataOutD_Atom, DATASIZE_OUT_ATOM )); assert(pDataOutD_Atom); #ifdef ENABLE_GPU_WRITEOUTLIST CUDA_SAFE_CALL( hipMalloc( (void**) &pDataOutD_List, DATASIZE_OUT_LIST )); assert(pDataOutD_List); #endif CUDA_SAFE_CALL( hipMalloc( (void**) &pDataInD_Index, DATASIZE_IN_INDEX )); assert(pDataInD_Index); CUDA_SAFE_CALL( hipMalloc( (void**) &pDataInD_State, DATASIZE_IN_STATE )); assert(pDataInD_State); CUDA_SAFE_CALL( hipMalloc( (void**) &pDataInD_Bounds, p->iDataSizeBounds )); assert(pDataInD_Bounds); CUDA_SAFE_CALL( hipMalloc( (void**) &pDataInD_Raw, p->iViewDataSize )); assert(pDataInD_Raw); // biggest last CUDA_SAFE_CALL( hipMemcpy(pDataInD_Raw, &gpDataIn_Raw[p->iViewDataOffsetInFloats], p->iViewDataSize, hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemcpy(pDataInD_Index, gpDataIn_Index, DATASIZE_IN_INDEX, hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemcpy(pDataInD_Bounds, p->pDataIn_Bounds, p->iDataSizeBounds, hipMemcpyHostToDevice )); CUDA_SAFE_CALL( hipMemset(pDataInD_State, 0, DATASIZE_IN_STATE )); CUDA_SAFE_CALL( hipMemset(pDataOutD_Atom, 0, DATASIZE_OUT_ATOM )); HANDLE_ERROR("memset") // prepare device environment dim3 block_size; dim3 grid_size; grid_size.x = p->iNumThreadBlocks / GRIDHEIGHT; grid_size.y = GRIDHEIGHT; grid_size.z = 1; block_size.x = kThreadBlockSize; block_size.y = 1; block_size.z = 1; unsigned int mem_shared = 0; // this is for dynamic alloc of shared mem, we alloc statically unsigned int iMyAtoms[2]; assert(DATASIZE_OUT_ATOM == sizeof(iMyAtoms)); iMyAtoms[0] = 0; iMyAtoms[1] = 0; #define kMaxGridSize (63*1024) assert(grid_size.x <= kMaxGridSize && "grid_size.x larger than supported (hipGetDeviceProperties:maxGridSize: 63k currently)"); printf("run_:%d,%d\n",iPartID,iGPUId); bool bPrintFirst = true; // precalc params as much as possible, i think they are only needed once for the whole program, not for every thread do { // run kernel hipLaunchKernelGGL(( mykernel), dim3(grid_size), dim3(block_size), mem_shared , 0, pDataInD_Raw, pDataInD_Index, pDataInD_State, pDataInD_Bounds, pDataOutD_Atom, pDataOutD_List, p->fEpsilon, p->fEpsilon*p->fEpsilon, p->iElementOffsetInPoints, p->iViewDataOffsetInPoints); //~ CUT_SAFE_CALL( hipDeviceSynchronize()); // kernel start is async, so wait for finish here //~ HANDLE_ERROR("threadsync") // copy result from device to host //~ CUDA_SAFE_CALL( hipMemcpy(pDataInDebug_State, pDataInD_State, DATASIZE_IN_STATE, hipMemcpyDeviceToHost )); HANDLE_ERROR("hipMemcpy state") CUDA_SAFE_CALL( hipMemcpy(iMyAtoms, pDataOutD_Atom, DATASIZE_OUT_ATOM, hipMemcpyDeviceToHost )); HANDLE_ERROR("hipMemcpy atom") // get counter CUDA_SAFE_CALL( hipMemset(pDataOutD_Atom, 0, DATASIZE_OUT_ATOM )); HANDLE_ERROR("hipMemset atom") // clear counter int iNumResults = min(kMaxResults,iMyAtoms[0]); //~ int iNumResults = min(kMaxResults,p->pDataOut_Atom[0]); //~ printf("datapart %d : atom[0]=%d atom[1]=%d iNumResults=%d kMaxResults=%d\n",p->iPartID,p->pDataOut_Atom[0],p->pDataOut_Atom[1],iNumResults,(int)kMaxResults); //~ printf("datapart %d : atom[0]=%d atom[1]=%d iNumResults=%d kMaxResults=%d\n",p->iPartID,iMyAtoms[0],iMyAtoms[1],iNumResults,(int)kMaxResults); #ifdef ENABLE_GPU_WRITEOUTLIST int iListSize = iNumResults * sizeof(ulong2); CUDA_SAFE_CALL( hipMemcpy(p->pDataOut_List, pDataOutD_List, iListSize, hipMemcpyDeviceToHost )); // get list #endif //~ printf("datapart %d : receive results from device\n",p->iPartID); p->iResultCount += iNumResults; // todo : do something with the data here, e.g. printf, or write to file ? //~ int b = grid_size.x*grid_size.y; //~ for (int i=0;i<grid_size.x*grid_size.y+4;++i) if (pDataInDebug_State[i].x == kStateEndValue) --b; //~ printf("[%d,%d,%d,%d]:%d\n",iPartID,a,iNumResults,(int)iMyAtoms[1],b); if (iMyAtoms[1] == 0 && iNumResults == 0) break; // detect end //~ if (--a == 0) break; if (bPrintFirst) { bPrintFirst = false; printf("run_firstdone:%d,%d\n",iPartID,iGPUId); } break; } while (1); printf("datapart %d : atom[0]=%d atom[1]=%d iNumResults=%d kMaxResults=%d\n",p->iPartID,iMyAtoms[0],iMyAtoms[1],p->iResultCount,(int)kMaxResults); // release device memory CUDA_SAFE_CALL(hipFree(pDataInD_Raw)); CUDA_SAFE_CALL(hipFree(pDataInD_Index)); CUDA_SAFE_CALL(hipFree(pDataInD_State)); CUDA_SAFE_CALL(hipFree(pDataOutD_Atom)); //~ CUDA_SAFE_CALL(hipFree(pDataOutD_List)); CUT_THREADEND; } // ##### ##### ##### ##### ##### main int main( int argc, char** argv) { #ifdef ENABLE_MEASUREMENT_CPU_IDX #ifdef ENABLE_MEASUREMENT_CPU_NEST printf("don't enable ENABLE_MEASUREMENT_CPU_IDX and _NEST at the same time !\n"); exit(0); #endif #endif CUT_DEVICE_INIT(); const char* sCPU_CalcType = "none"; const char* sGPU_CalcType = "none"; int count = 0; CUDA_SAFE_CALL( hipGetDeviceCount(&count)); assert(count >= kGPU_COUNT); float fEpsilon = TEST_EPSILON; // todo : read from arg ? float fSqEpsilon = fEpsilon*fEpsilon; float fTimeGPU = 1; float fTimeCPU = 1; int iResultsGPU = 0; int iResultsCPU = 0; // print device infos if (0) PrintDeviceInfos(); // size calc and alloc gpDataIn_Raw = (float*) malloc(DATASIZE_IN_RAW); gpDataIn_Index = (float*) malloc(DATASIZE_IN_INDEX); // print some infos printf("N=%d\n",N); printf("SX=%d\n",SX); printf("SY=%d\n",SY); printf("SZ=%d\n",SZ); printf("I0=%d\n",I0); printf("DATASIZE_IN_RAW=%dkb\n", DATASIZE_IN_RAW /1024); printf("DATASIZE_IN_STATE=%dkb\n", DATASIZE_IN_STATE /1024); printf("DATASIZE_IN_INDEX=%dkb\n", DATASIZE_IN_INDEX /1024); printf("DATASIZE_IN_TOTAL=%dkb\n", DATASIZE_IN_TOTAL /1024); printf("DATASIZE_OUT_TOTAL=%dkb\n", DATASIZE_OUT_TOTAL /1024); // read file RobStartTimer(); ReadTextData(kDataFilePath,gpDataIn_Raw); // read raw data from file printf("%4.2f sec : reading data from file\n",RobStopTimer()); // generate index RobStartTimer(); IndexStructure_Generate(gpDataIn_Raw,gpDataIn_Index); // gen index and sort raw data printf("%4.2f sec : generating index data\n",RobStopTimer()); #ifdef ENABLE_MEASUREMENT_GPU if (1) { int threadIds[kGPU_COUNT]; int iPartID,iGPUIndex; int iPartCount = 0; CUTThread* threads = (CUTThread *)malloc(sizeof(CUTThread) * kGPU_COUNT); RobStartTimer();printf("starting gpu...\n"); #ifdef GPU_NESTED_LOOP sGPU_CalcType = "nest"; #else sGPU_CalcType = "idx"; #endif // determine how the data will be subdivided bool bOK = false; for (iPartCount=kGPU_COUNT;iPartCount <= kMaxPartCount;iPartCount*=2) { bOK = true; for (iPartID=0;iPartID<iPartCount;++iPartID) if (!cDataPart_Prepare(&gDataParts[iPartID],iPartID,iPartCount,fEpsilon)) { bOK = false; break; } if (bOK) break; } if (!bOK) { printf("failed to find a partitioning that fits in vram, try lowering epsilon, or increase I0\n"); exit(1); } for (iPartID=0;iPartID<iPartCount;++iPartID) cDataPart_Alloc(&gDataParts[iPartID]); //~ for (int j=0;j<I0/2;++j) printf("%d:%+0.1f %d:%+0.1f\n",j,GET_MIN_0(j),j+I0/2,GET_MIN_0(j+I0/2)); #ifdef GPU_NESTED_LOOP assert(iPartCount == 1 && "nested loop on gpu must currently be on a single GPU"); #endif // start threads for (iPartID=0;iPartID<iPartCount;++iPartID) { iGPUIndex = iPartID % kGPU_COUNT; threadIds[iGPUIndex] = iPartID; gpuThread(&threadIds[iGPUIndex]); //~ threads[iGPUIndex] = cutStartThread((CUT_THREADROUTINE)gpuThread, (void *)&threadIds[iGPUIndex]); //~ if (iGPUIndex == kGPU_COUNT-1) cutWaitForThreads(threads, kGPU_COUNT); // Wait for all the threads to finish. } iResultsGPU = 0; for (iPartID=0;iPartID<iPartCount;++iPartID) iResultsGPU += gDataParts[iPartID].iResultCount; for (iPartID=0;iPartID<iPartCount;++iPartID) cDataPart_Free(&gDataParts[iPartID]); free(threads); printf("all threads finished\n"); fTimeGPU = RobStopTimer(); printf("%4.2f sec : gpu\n",fTimeGPU); } #endif #ifdef ENABLE_MEASUREMENT_CPU_NEST if (1) { // NestedLoop RobStartTimer(); printf("check : NestedLoop...\n"); iResultsCPU = NestedLoop(gpDataIn_Raw,fSqEpsilon); fTimeCPU = RobStopTimer(); printf("%4.2f sec : check : NestedLoop\n",fTimeCPU); sCPU_CalcType = "nest"; } #endif #ifdef ENABLE_MEASUREMENT_CPU_IDX if (1) { // Idx_CPU RobStartTimer(); printf("check : with index on cpu...\n"); iResultsCPU = Idx_CPU(gpDataIn_Raw,gpDataIn_Index,fEpsilon,fSqEpsilon); fTimeCPU = RobStopTimer(); printf("%4.2f sec : check : with index on cpu\n",fTimeCPU); sCPU_CalcType = "idx"; } #endif // time-text char myTimeText[256] = ""; time_t mytime; time(&mytime); strftime(myTimeText,255,"%Y.%m.%d_%H.%M.%S",localtime(&mytime)); if (1) { // write report FILE* fp = fopen(kReportFile,"a"); if (fp) { int iGPU_IDX3 = 0; int iCPU_IDX3 = 0; #ifdef ENABLE_GPU_IDX3 iGPU_IDX3 = 1; #endif #ifdef ENABLE_CPU_IDX3 iCPU_IDX3 = 1; #endif #ifdef DATA_PURE_RANDOM const char* szDataType = "unirand"; #else #ifdef DATA_CLUST_RANDOM const char* szDataType = "clustrand"; #else const char* szDataType = "file"; #endif #endif float fTolerance = 0.01; // 1 % tolerance due to less exact float fRelError = float(abs(iResultsCPU - iResultsGPU)) / float( (iResultsCPU > 0) ? iResultsCPU : 1 ); bool bOK = fRelError < fTolerance; // error relative to "correct" cpu results... float fErrorPercent = ceil(fRelError * 1000.0) * 0.1; #define REPORT "%s gpu/cpu=%8.1f %s err<=%0.1f%% N=%d size=%dMB %s I0=%d i3:%d,%d e=%0.1f d=%d tgpu(%s)=%0.1fs=%0.1fm=%0.1fh tcpu(%s)=%0.1f res=%d/%d\n", \ myTimeText,fTimeGPU/fTimeCPU,bOK?" ok ":"MISS", fErrorPercent, \ N,DATASIZE_IN_RAW/MB,szDataType,I0,iGPU_IDX3,iCPU_IDX3, \ (float)TEST_EPSILON,(int)D,sGPU_CalcType,fTimeGPU,fTimeGPU/60,fTimeGPU/3600,sCPU_CalcType,fTimeCPU, iResultsCPU,iResultsGPU printf( REPORT ); fprintf( fp, REPORT ); fclose(fp); } } if (0) { // write sample data char mySampleDataPath[256] = ""; sprintf(mySampleDataPath,"sampledata/sampledata_%s_%dMB.txt",myTimeText,(int)DATASIZE_IN_RAW/MB); FILE* fp = fopen(mySampleDataPath,"a"); if (fp) { fprintf(fp,"START e=%f rcpu=%d tcpu=%f\n",fEpsilon,iResultsCPU,fTimeCPU); for (int i=0;i<N;++i) { for (int d=0;d<D;++d) fprintf(fp,"%f,",gpDataIn_Raw[i*D + d]); fprintf(fp,"\n"); } fclose(fp); } } // release memory free(gpDataIn_Raw); free(gpDataIn_Index); CUT_EXIT(argc, argv); return 0; }
99428351775c01c56cea0ba9c2debda870c87a2a.cu
/// LMU Muenchen ProjektArbeit Robert Noll 2007 : Similarity Join mittels Grafikprozessor #ifdef _WIN32 #define NOMINMAX #endif // includes #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> #include <cuda_runtime.h> #include <sm_11_atomic_functions.h> // note : for threads you need windows.h from the platform sdk : // http://www.microsoft.com/downloads/details.aspx?FamilyId=A55B6B43-E24F-4EA3-A93E-40C0EC4F68E5&displaylang=en #include "threads.h" #undef assert //~ #define assert(code) printf("assert %s : (%s)\n",(int)(code) ? "passed" : "FAILED",#code); if (!(int)(code)) exit(0); #define assert(code) if (!(int)(code)) { printf("assert FAILED : (%s)\n",#code); exit(0); } #define N_FILE_1MB_PER_DIM (1024*1024 / 4) // = 9mb for D=9 floats.. // N_FILE_1MB_PER_DIM = 9mb for 9 floats.. // N_FILE_1MB_PER_DIM*10 = 90mb // N_FILE_1MB_PER_DIM*10*4 = 360mb // input parameters #define D (8) #define N_FILE (N_FILE_1MB_PER_DIM * 1 / D) #define TEST_EPSILON 1.0 //~ #define GPU_NESTED_LOOP // nested loop on gpu, don't use index //~ #define ENABLE_GPU_WRITEOUTLIST //~ #define ENABLE_GPU_WRITEOUTLIST_BIG_RESULT_LIST // activates code for the case that the result list doesn't fit in vram and has to be written out multiple times #define ENABLE_MEASUREMENT_GPU //~ #define ENABLE_MEASUREMENT_CPU_IDX // don't enable both cpu_idx and nest at the same time //~ #define ENABLE_MEASUREMENT_CPU_NEST // don't enable both cpu_idx and nest at the same time #define ENABLE_GPU_IDX3 #define ENABLE_CPU_IDX3 // filepath (todo : read from commandline) //~ #define kDataFilePath "data/coarse-strong-ultra-100-96_10000.txt" // ??? format unknown.. very big lines... #define kDataFilePath "data/Corel_ColorMoments_9d.ascii" // 68040 lines : n=65536 //~ #define kDataFilePath "data/Corel_CoocTexture_16d.ascii" // 68040 lines : n=65536 //~ #define kDataFilePath "data/smalldata.ascii" // 16383 lines : n=16384 #define kReportFile "report.txt" // random data generation //~ #define DATA_PURE_RANDOM //~ #define DATA_PURE_RANDOM_MIN (0.0) //~ #define DATA_PURE_RANDOM_MAX (8.0) #define DATA_CLUST_RANDOM #define DATA_CLUST_RANDOM_NUM_CLUST 6 #define DATA_CLUST_RANDOM_RAD_MIN 1.0 #define DATA_CLUST_RANDOM_RAD_MAX 2.0 #define DATA_CLUST_RANDOM_CENTER_MIN 2.0 #define DATA_CLUST_RANDOM_CENTER_MAX 6.0 // constants #define MB (1024 * 1024) // bytes per megabyte #define GRIDHEIGHT 4 #define kThreadBlockSize (64) // x*64:,64,128,192,256 (see cuda-guide page 65) #define kWrapSize (32) // some code in the kernel might depend on this being exactly 32 #define SIZE_RESULTLIST (64 * MB) // 16 mb depends on graka mem, but not needed dynamically #define kMaxResults (SIZE_RESULTLIST/sizeof(ulong2)) // 1 result = 2 uint32 indices #define kLastValidResultIndex (kMaxResults-1) #define kGPU_COUNT 1 #define I0 (16) // index segment-count (max:254) // should be power of two for best performance //#define I0 (64) // index segment-count (max:254) // segment sizes #define I03 (I0*I0*I0) // I0=32 -> 32k*9*4=1mb , I0=16 -> 4k*9*4=144k #define SZ (int(N_FILE + I03-1)/I03) // round upwards #define SY (SZ*I0) #define SX (SZ*I0*I0) #define N (SZ*I0*I0*I0) // rounded upwards, N >= N_FILE , this is the amount of data allocated // DATASIZE for SZ = 254 = 254*64*64*64 / 1024 / 1024 * 4 * 9 = ca 2286mb // DATASIZE for SZ = 32 = 32*64*64*64 / 1024 / 1024 * 4 * 9 = ca 288mb // DATASIZE for SZ = 1 = 1*64*64*64 / 1024 / 1024 * 4 * 9 = ca 9mb // SZ may not be larger than 254, increase I0 if that happens // SZ=254 + I0=254 allows for (254^4) lines of data, which is about 139 gigabyte, that should be enough for a while // increase index type from uchar4 to ushort4 if not.... (DONE, as it's only 254^3...) // index level starts #define INDEXSTART_0 (0) #define INDEXSTART_1 ((I0+1)) #define INDEXSTART_2 ((I0+1) + (I0+1)*I0) #define INDEX_END ((I0+1) + (I0+1)*I0 + (I0+1)*I0*I0) #define SIZE_INDEX (INDEX_END * sizeof(float)) // (64 + 64*63 + 64*63*63) * sizeof(float) = ca 1mb : 3 level // index position calc #define INDEXPOS_0(x) (INDEXSTART_0 + (x) ) // x<=I0 #define INDEXPOS_1(x,y) (INDEXSTART_1 + (y) + (x)*(I0+1) ) // x<I0 y<=I0 #define INDEXPOS_2(x,y,z) (INDEXSTART_2 + (z) + (y)*(I0+1) + (x)*(I0+1)*I0 ) // *<I0 z<=I0 // n = iNumLines = number of data-points #define DATASIZE_IN_RAW (N*D*sizeof(float)) #define DATASIZE_IN_STATE ((N/kThreadBlockSize)*sizeof(uint4)) // this is too big, but easier this way #define DATASIZE_IN_INDEX (SIZE_INDEX) #define DATASIZE_IN_TOTAL (DATASIZE_IN_RAW + DATASIZE_IN_STATE + DATASIZE_IN_INDEX) #define DATASIZE_OUT_ATOM (sizeof(unsigned int) * 2) #define DATASIZE_OUT_LIST (SIZE_RESULTLIST) #define DATASIZE_OUT_TOTAL (DATASIZE_OUT_ATOM + DATASIZE_OUT_LIST) #define kStateEndValue 0x2fffFFFF // forward declarations int ReadTextData (const char* szFilePath,float* pData); #include "utils.cu" #include "idx_kernel.cu" #include "idx_prepare.cu" #include "idx_cpu.cu" #include "nestedloop.cu" #include <time.h> float* gpDataIn_Raw = 0; float* gpDataIn_Index = 0; // ##### ##### ##### ##### ##### dataparts for threads #define kMaxPartCount I0 #define kMaxPartDataSize (9 * 4 * 11 * MB) // = 396mb = ok on both devices typedef struct { int iPartID; int iPartElementsFirst; int iPartElementsLast; int iPartViewFirst; int iPartViewLast; int iViewDataOffsetInFloats; ///< global offset int iViewDataOffsetInPoints; ///< global offset int iViewDataSize; ///< in bytes int iElementOffsetInPoints; ///< offset relative to part start int iElementCount; ///< in datapoints int iResultCount; int iNumThreadBlocks; int iDataSizeBounds; float fEpsilon; unsigned int* pDataOut_Atom; ulong2* pDataOut_List; float3* pDataIn_Bounds; uint4* pDataInDebug_State; } cDataPart; cDataPart gDataParts[kMaxPartCount]; bool cDataPart_Prepare (cDataPart* p,int iPartID,int iPartCount,float e) { p->iPartID = iPartID; p->fEpsilon = e; p->iResultCount = 0; p->pDataOut_Atom = 0; p->pDataOut_List = 0; p->pDataIn_Bounds = 0; p->pDataInDebug_State = 0; // determine the datarange that will be managed by this part/thread int iMaxPartLen = (I0 + iPartCount - 1)/iPartCount; // round up p->iPartElementsFirst = iPartID*iMaxPartLen; // each element gets its own thread later p->iPartElementsLast = min(p->iPartElementsFirst+iMaxPartLen-1,I0-1); //~ // determine the view = the area in the data that has to be scanned, e.g. elements + epsilon border p->iPartViewFirst = 0; // the view is the data range that has to be examined p->iPartViewLast = I0-1; // inclusive #define GET_MIN_0(x) gpDataIn_Index[INDEXPOS_0(x)] #define GET_MAX_0(x) GET_MIN_0((x+1)) float fMin = GET_MIN_0(p->iPartElementsFirst ) - p->fEpsilon; float fMax = GET_MAX_0(p->iPartElementsLast ) + p->fEpsilon; // max(i)=min(i+1) while (p->iPartViewFirst < I0-1 && GET_MAX_0(p->iPartViewFirst) < fMin) ++p->iPartViewFirst; while (p->iPartViewLast > 0 && GET_MIN_0(p->iPartViewLast) > fMax) --p->iPartViewLast; assert(p->iPartViewFirst <= p->iPartElementsFirst); assert(p->iPartViewLast >= p->iPartElementsLast); // calc mem usage and positions p->iViewDataSize = (p->iPartViewLast+1 - p->iPartViewFirst ) * SX * D * sizeof(float); // in bytes p->iElementCount = (p->iPartElementsLast+1 - p->iPartElementsFirst ) * SX; // in datapoints p->iViewDataOffsetInPoints = p->iPartViewFirst * SX; p->iViewDataOffsetInFloats = p->iPartViewFirst * SX * D; p->iElementOffsetInPoints = (p->iPartElementsFirst - p->iPartViewFirst) * SX; p->iNumThreadBlocks = p->iElementCount / kThreadBlockSize; p->iDataSizeBounds = p->iNumThreadBlocks*2*sizeof(float3); // print a little debug info printf("parts=%d [%d]: elem[%d,%d] view[%d,%d] datasize=%dMB\n",iPartCount,iPartID, p->iPartElementsFirst,p->iPartElementsLast, p->iPartViewFirst,p->iPartViewLast,p->iViewDataSize/MB); //~ // check if this part fits in vram if (p->iViewDataSize > kMaxPartDataSize) return false; return true; } /// malloc might not be threadsafe void cDataPart_Alloc (cDataPart* p) { p->pDataIn_Bounds = (float3*) malloc(p->iDataSizeBounds); p->pDataOut_Atom = (unsigned int*) malloc(DATASIZE_OUT_ATOM); #ifdef ENABLE_GPU_WRITEOUTLIST p->pDataOut_List = (ulong2*) malloc(DATASIZE_OUT_LIST); #endif p->pDataInDebug_State = (uint4*) malloc(DATASIZE_IN_STATE); } void cDataPart_Free (cDataPart* p) { #define FREE_AND_ZERO(x) if (x) free(x); x = 0; FREE_AND_ZERO(p->pDataIn_Bounds); FREE_AND_ZERO(p->pDataOut_Atom); FREE_AND_ZERO(p->pDataOut_List); FREE_AND_ZERO(p->pDataInDebug_State); } //////////////////////////////////////////////////////////////////////////////// // GPU thread //////////////////////////////////////////////////////////////////////////////// static CUT_THREADPROC gpuThread (int* piPartID) { int iPartID = *piPartID; cDataPart* p = &gDataParts[iPartID]; int iGPUId = iPartID % kGPU_COUNT; CUDA_SAFE_CALL(cudaSetDevice(iGPUId)); printf("run:%d,%d (...,e=%f,%d,%d) off=%d,size=%d,el=%d\n", iPartID,iGPUId, p->fEpsilon,p->iElementOffsetInPoints,p->iViewDataOffsetInPoints, p->iViewDataOffsetInFloats,p->iViewDataSize,p->iElementCount); #define HANDLE_ERROR(x) myLastErr = cudaGetLastError(); if (myLastErr != 0) printf("%s : %d(%s)\n",x,(int)myLastErr,(myLastErr != 0) ? cudaGetErrorString(myLastErr) : "ok"); cudaError_t myLastErr; uint4* pDataInDebug_State = p->pDataInDebug_State; // calculate bounds of individual threadblocks // this part is only done once at startup, so performance doesn't really matter much if (1) { float* pElements = &gpDataIn_Raw[p->iViewDataOffsetInFloats + p->iElementOffsetInPoints*D]; float e = p->fEpsilon; float3 vMin,vMax,vCur; // foreach threadblock... for (int iBlockIdx=0;iBlockIdx < p->iNumThreadBlocks;++iBlockIdx) { // calculate the bounds for the wrap, for the first 3 dimensions { for (int d=0;d<kThreadBlockSize;++d) { // compiler should loop-unroll vCur.x = pElements[(iBlockIdx * kThreadBlockSize + d)*D + 0]; vCur.y = pElements[(iBlockIdx * kThreadBlockSize + d)*D + 1]; vCur.z = pElements[(iBlockIdx * kThreadBlockSize + d)*D + 2]; if (d > 0) { vMin.x = min(vMin.x,vCur.x); vMin.y = min(vMin.y,vCur.y); vMin.z = min(vMin.z,vCur.z); vMax.x = max(vMax.x,vCur.x); vMax.y = max(vMax.y,vCur.y); vMax.z = max(vMax.z,vCur.z); } else { vMin = vCur; vMax = vCur; } }} // add epsilon to the edges of the bounds vMin.x = vMin.x - e; vMin.y = vMin.y - e; vMin.z = vMin.z - e; vMax.x = vMax.x + e; vMax.y = vMax.y + e; vMax.z = vMax.z + e; p->pDataIn_Bounds[iBlockIdx*2+0] = vMin; p->pDataIn_Bounds[iBlockIdx*2+1] = vMax; } } // alloc and init device memory (vram) float* pDataInD_Raw = NULL; float* pDataInD_Index = NULL; uint4* pDataInD_State = NULL; float3* pDataInD_Bounds = NULL; unsigned int* pDataOutD_Atom = NULL; ulong2* pDataOutD_List = NULL; CUDA_SAFE_CALL( cudaMalloc( (void**) &pDataOutD_Atom, DATASIZE_OUT_ATOM )); assert(pDataOutD_Atom); #ifdef ENABLE_GPU_WRITEOUTLIST CUDA_SAFE_CALL( cudaMalloc( (void**) &pDataOutD_List, DATASIZE_OUT_LIST )); assert(pDataOutD_List); #endif CUDA_SAFE_CALL( cudaMalloc( (void**) &pDataInD_Index, DATASIZE_IN_INDEX )); assert(pDataInD_Index); CUDA_SAFE_CALL( cudaMalloc( (void**) &pDataInD_State, DATASIZE_IN_STATE )); assert(pDataInD_State); CUDA_SAFE_CALL( cudaMalloc( (void**) &pDataInD_Bounds, p->iDataSizeBounds )); assert(pDataInD_Bounds); CUDA_SAFE_CALL( cudaMalloc( (void**) &pDataInD_Raw, p->iViewDataSize )); assert(pDataInD_Raw); // biggest last CUDA_SAFE_CALL( cudaMemcpy(pDataInD_Raw, &gpDataIn_Raw[p->iViewDataOffsetInFloats], p->iViewDataSize, cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemcpy(pDataInD_Index, gpDataIn_Index, DATASIZE_IN_INDEX, cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemcpy(pDataInD_Bounds, p->pDataIn_Bounds, p->iDataSizeBounds, cudaMemcpyHostToDevice )); CUDA_SAFE_CALL( cudaMemset(pDataInD_State, 0, DATASIZE_IN_STATE )); CUDA_SAFE_CALL( cudaMemset(pDataOutD_Atom, 0, DATASIZE_OUT_ATOM )); HANDLE_ERROR("memset") // prepare device environment dim3 block_size; dim3 grid_size; grid_size.x = p->iNumThreadBlocks / GRIDHEIGHT; grid_size.y = GRIDHEIGHT; grid_size.z = 1; block_size.x = kThreadBlockSize; block_size.y = 1; block_size.z = 1; unsigned int mem_shared = 0; // this is for dynamic alloc of shared mem, we alloc statically unsigned int iMyAtoms[2]; assert(DATASIZE_OUT_ATOM == sizeof(iMyAtoms)); iMyAtoms[0] = 0; iMyAtoms[1] = 0; #define kMaxGridSize (63*1024) assert(grid_size.x <= kMaxGridSize && "grid_size.x larger than supported (cudaGetDeviceProperties:maxGridSize: 63k currently)"); printf("run_:%d,%d\n",iPartID,iGPUId); bool bPrintFirst = true; // precalc params as much as possible, i think they are only needed once for the whole program, not for every thread do { // run kernel mykernel<<<grid_size, block_size, mem_shared >>>( pDataInD_Raw, pDataInD_Index, pDataInD_State, pDataInD_Bounds, pDataOutD_Atom, pDataOutD_List, p->fEpsilon, p->fEpsilon*p->fEpsilon, p->iElementOffsetInPoints, p->iViewDataOffsetInPoints); //~ CUT_SAFE_CALL( cudaThreadSynchronize()); // kernel start is async, so wait for finish here //~ HANDLE_ERROR("threadsync") // copy result from device to host //~ CUDA_SAFE_CALL( cudaMemcpy(pDataInDebug_State, pDataInD_State, DATASIZE_IN_STATE, cudaMemcpyDeviceToHost )); HANDLE_ERROR("cudaMemcpy state") CUDA_SAFE_CALL( cudaMemcpy(iMyAtoms, pDataOutD_Atom, DATASIZE_OUT_ATOM, cudaMemcpyDeviceToHost )); HANDLE_ERROR("cudaMemcpy atom") // get counter CUDA_SAFE_CALL( cudaMemset(pDataOutD_Atom, 0, DATASIZE_OUT_ATOM )); HANDLE_ERROR("cudaMemset atom") // clear counter int iNumResults = min(kMaxResults,iMyAtoms[0]); //~ int iNumResults = min(kMaxResults,p->pDataOut_Atom[0]); //~ printf("datapart %d : atom[0]=%d atom[1]=%d iNumResults=%d kMaxResults=%d\n",p->iPartID,p->pDataOut_Atom[0],p->pDataOut_Atom[1],iNumResults,(int)kMaxResults); //~ printf("datapart %d : atom[0]=%d atom[1]=%d iNumResults=%d kMaxResults=%d\n",p->iPartID,iMyAtoms[0],iMyAtoms[1],iNumResults,(int)kMaxResults); #ifdef ENABLE_GPU_WRITEOUTLIST int iListSize = iNumResults * sizeof(ulong2); CUDA_SAFE_CALL( cudaMemcpy(p->pDataOut_List, pDataOutD_List, iListSize, cudaMemcpyDeviceToHost )); // get list #endif //~ printf("datapart %d : receive results from device\n",p->iPartID); p->iResultCount += iNumResults; // todo : do something with the data here, e.g. printf, or write to file ? //~ int b = grid_size.x*grid_size.y; //~ for (int i=0;i<grid_size.x*grid_size.y+4;++i) if (pDataInDebug_State[i].x == kStateEndValue) --b; //~ printf("[%d,%d,%d,%d]:%d\n",iPartID,a,iNumResults,(int)iMyAtoms[1],b); if (iMyAtoms[1] == 0 && iNumResults == 0) break; // detect end //~ if (--a == 0) break; if (bPrintFirst) { bPrintFirst = false; printf("run_firstdone:%d,%d\n",iPartID,iGPUId); } break; } while (1); printf("datapart %d : atom[0]=%d atom[1]=%d iNumResults=%d kMaxResults=%d\n",p->iPartID,iMyAtoms[0],iMyAtoms[1],p->iResultCount,(int)kMaxResults); // release device memory CUDA_SAFE_CALL(cudaFree(pDataInD_Raw)); CUDA_SAFE_CALL(cudaFree(pDataInD_Index)); CUDA_SAFE_CALL(cudaFree(pDataInD_State)); CUDA_SAFE_CALL(cudaFree(pDataOutD_Atom)); //~ CUDA_SAFE_CALL(cudaFree(pDataOutD_List)); CUT_THREADEND; } // ##### ##### ##### ##### ##### main int main( int argc, char** argv) { #ifdef ENABLE_MEASUREMENT_CPU_IDX #ifdef ENABLE_MEASUREMENT_CPU_NEST printf("don't enable ENABLE_MEASUREMENT_CPU_IDX and _NEST at the same time !\n"); exit(0); #endif #endif CUT_DEVICE_INIT(); const char* sCPU_CalcType = "none"; const char* sGPU_CalcType = "none"; int count = 0; CUDA_SAFE_CALL( cudaGetDeviceCount(&count)); assert(count >= kGPU_COUNT); float fEpsilon = TEST_EPSILON; // todo : read from arg ? float fSqEpsilon = fEpsilon*fEpsilon; float fTimeGPU = 1; float fTimeCPU = 1; int iResultsGPU = 0; int iResultsCPU = 0; // print device infos if (0) PrintDeviceInfos(); // size calc and alloc gpDataIn_Raw = (float*) malloc(DATASIZE_IN_RAW); gpDataIn_Index = (float*) malloc(DATASIZE_IN_INDEX); // print some infos printf("N=%d\n",N); printf("SX=%d\n",SX); printf("SY=%d\n",SY); printf("SZ=%d\n",SZ); printf("I0=%d\n",I0); printf("DATASIZE_IN_RAW=%dkb\n", DATASIZE_IN_RAW /1024); printf("DATASIZE_IN_STATE=%dkb\n", DATASIZE_IN_STATE /1024); printf("DATASIZE_IN_INDEX=%dkb\n", DATASIZE_IN_INDEX /1024); printf("DATASIZE_IN_TOTAL=%dkb\n", DATASIZE_IN_TOTAL /1024); printf("DATASIZE_OUT_TOTAL=%dkb\n", DATASIZE_OUT_TOTAL /1024); // read file RobStartTimer(); ReadTextData(kDataFilePath,gpDataIn_Raw); // read raw data from file printf("%4.2f sec : reading data from file\n",RobStopTimer()); // generate index RobStartTimer(); IndexStructure_Generate(gpDataIn_Raw,gpDataIn_Index); // gen index and sort raw data printf("%4.2f sec : generating index data\n",RobStopTimer()); #ifdef ENABLE_MEASUREMENT_GPU if (1) { int threadIds[kGPU_COUNT]; int iPartID,iGPUIndex; int iPartCount = 0; CUTThread* threads = (CUTThread *)malloc(sizeof(CUTThread) * kGPU_COUNT); RobStartTimer();printf("starting gpu...\n"); #ifdef GPU_NESTED_LOOP sGPU_CalcType = "nest"; #else sGPU_CalcType = "idx"; #endif // determine how the data will be subdivided bool bOK = false; for (iPartCount=kGPU_COUNT;iPartCount <= kMaxPartCount;iPartCount*=2) { bOK = true; for (iPartID=0;iPartID<iPartCount;++iPartID) if (!cDataPart_Prepare(&gDataParts[iPartID],iPartID,iPartCount,fEpsilon)) { bOK = false; break; } if (bOK) break; } if (!bOK) { printf("failed to find a partitioning that fits in vram, try lowering epsilon, or increase I0\n"); exit(1); } for (iPartID=0;iPartID<iPartCount;++iPartID) cDataPart_Alloc(&gDataParts[iPartID]); //~ for (int j=0;j<I0/2;++j) printf("%d:%+0.1f %d:%+0.1f\n",j,GET_MIN_0(j),j+I0/2,GET_MIN_0(j+I0/2)); #ifdef GPU_NESTED_LOOP assert(iPartCount == 1 && "nested loop on gpu must currently be on a single GPU"); #endif // start threads for (iPartID=0;iPartID<iPartCount;++iPartID) { iGPUIndex = iPartID % kGPU_COUNT; threadIds[iGPUIndex] = iPartID; gpuThread(&threadIds[iGPUIndex]); //~ threads[iGPUIndex] = cutStartThread((CUT_THREADROUTINE)gpuThread, (void *)&threadIds[iGPUIndex]); //~ if (iGPUIndex == kGPU_COUNT-1) cutWaitForThreads(threads, kGPU_COUNT); // Wait for all the threads to finish. } iResultsGPU = 0; for (iPartID=0;iPartID<iPartCount;++iPartID) iResultsGPU += gDataParts[iPartID].iResultCount; for (iPartID=0;iPartID<iPartCount;++iPartID) cDataPart_Free(&gDataParts[iPartID]); free(threads); printf("all threads finished\n"); fTimeGPU = RobStopTimer(); printf("%4.2f sec : gpu\n",fTimeGPU); } #endif #ifdef ENABLE_MEASUREMENT_CPU_NEST if (1) { // NestedLoop RobStartTimer(); printf("check : NestedLoop...\n"); iResultsCPU = NestedLoop(gpDataIn_Raw,fSqEpsilon); fTimeCPU = RobStopTimer(); printf("%4.2f sec : check : NestedLoop\n",fTimeCPU); sCPU_CalcType = "nest"; } #endif #ifdef ENABLE_MEASUREMENT_CPU_IDX if (1) { // Idx_CPU RobStartTimer(); printf("check : with index on cpu...\n"); iResultsCPU = Idx_CPU(gpDataIn_Raw,gpDataIn_Index,fEpsilon,fSqEpsilon); fTimeCPU = RobStopTimer(); printf("%4.2f sec : check : with index on cpu\n",fTimeCPU); sCPU_CalcType = "idx"; } #endif // time-text char myTimeText[256] = ""; time_t mytime; time(&mytime); strftime(myTimeText,255,"%Y.%m.%d_%H.%M.%S",localtime(&mytime)); if (1) { // write report FILE* fp = fopen(kReportFile,"a"); if (fp) { int iGPU_IDX3 = 0; int iCPU_IDX3 = 0; #ifdef ENABLE_GPU_IDX3 iGPU_IDX3 = 1; #endif #ifdef ENABLE_CPU_IDX3 iCPU_IDX3 = 1; #endif #ifdef DATA_PURE_RANDOM const char* szDataType = "unirand"; #else #ifdef DATA_CLUST_RANDOM const char* szDataType = "clustrand"; #else const char* szDataType = "file"; #endif #endif float fTolerance = 0.01; // 1 % tolerance due to less exact float fRelError = float(abs(iResultsCPU - iResultsGPU)) / float( (iResultsCPU > 0) ? iResultsCPU : 1 ); bool bOK = fRelError < fTolerance; // error relative to "correct" cpu results... float fErrorPercent = ceil(fRelError * 1000.0) * 0.1; #define REPORT "%s gpu/cpu=%8.1f %s err<=%0.1f%% N=%d size=%dMB %s I0=%d i3:%d,%d e=%0.1f d=%d tgpu(%s)=%0.1fs=%0.1fm=%0.1fh tcpu(%s)=%0.1f res=%d/%d\n", \ myTimeText,fTimeGPU/fTimeCPU,bOK?" ok ":"MISS", fErrorPercent, \ N,DATASIZE_IN_RAW/MB,szDataType,I0,iGPU_IDX3,iCPU_IDX3, \ (float)TEST_EPSILON,(int)D,sGPU_CalcType,fTimeGPU,fTimeGPU/60,fTimeGPU/3600,sCPU_CalcType,fTimeCPU, iResultsCPU,iResultsGPU printf( REPORT ); fprintf( fp, REPORT ); fclose(fp); } } if (0) { // write sample data char mySampleDataPath[256] = ""; sprintf(mySampleDataPath,"sampledata/sampledata_%s_%dMB.txt",myTimeText,(int)DATASIZE_IN_RAW/MB); FILE* fp = fopen(mySampleDataPath,"a"); if (fp) { fprintf(fp,"START e=%f rcpu=%d tcpu=%f\n",fEpsilon,iResultsCPU,fTimeCPU); for (int i=0;i<N;++i) { for (int d=0;d<D;++d) fprintf(fp,"%f,",gpDataIn_Raw[i*D + d]); fprintf(fp,"\n"); } fclose(fp); } } // release memory free(gpDataIn_Raw); free(gpDataIn_Index); CUT_EXIT(argc, argv); return 0; }
cf470c81f110f841315a55b85fa1781b25b1221d.hip
// !!! This is a file automatically generated by hipify!!! /* * DeviceCuda.cu * * Created on: Apr 11, 2012 * Author: jbarbosa */ #include <hip/hip_runtime.h> #include <config/common.h> #include <gamalib/memlib/LowLevelMemAllocator.h> #include "DeviceCuda.h" #include <config/vtable.h> #include <config/vtable.cuh> #include <gamalib/utils/cuda_utils.cuh> #include <gamalib/GenericKernels/KernelCuda.cuh> #include <gamalib/utils/x86_utils.h> #define PostWork(W) outbox->enqueue(W); typedef unsigned char byte; __global__ //__launch_bounds__(256, 5) void genericKernel( Workqueue<work, INBOX_QUEUE_SIZE, GPU_CUDA>* INBOX) { unsigned long index = blockIdx.x * blockDim.x + threadIdx.x; work* w_item; if ((w_item = INBOX->data[index]) != NULL) { (w_item->*WORK_GPU_TABLE[w_item->getWorkTypeID()])(); } } __global__ //__launch_bounds__(256, 5) void genericWideKernel(work* w_item) { (w_item->*WORK_GPU_TABLE[w_item->getWorkTypeID()])(); } __inline__ void CudaTest(char *msg) { hipError_t e; if (hipSuccess != (e = hipGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", hipGetErrorString(e)); exit(-1); } } void DeviceCuda::classWideKernel(work* witem, unsigned st) { hipSetDevice(cudaDeviceID); #if defined GAMA_CACHE std::vector<pointerInfo>* Lw = (witem->*TOCACHEW_CPU_TABLE[witem->getWorkTypeID()])(); std::vector<pointerInfo>* Lr = (witem->*TOCACHER_CPU_TABLE[witem->getWorkTypeID()])(); for(int i=0; i< Lr->size(); i++) { cache->cachePtr((*Lr)[i].ptr,(*Lr)[i].lenght,&streams[st]); } #endif if (!IS_LIB_CALL(witem->WORK_TYPE_ID)) { hipLaunchKernelGGL(( genericWideKernel), dim3(cdp.multiProcessorCount * 5), dim3(256), 0, streams[st], witem); CudaTest("Launching parallel wide kernel"); } else { (witem->*WORK_GPU_LIB_TABLE[witem->getWorkTypeID()])(); } CudaTest("Launching parallel wide kernel"); #if defined GAMA_CACHE delete Lw; delete Lr; #endif }
cf470c81f110f841315a55b85fa1781b25b1221d.cu
/* * DeviceCuda.cu * * Created on: Apr 11, 2012 * Author: jbarbosa */ #include <cuda.h> #include <config/common.h> #include <gamalib/memlib/LowLevelMemAllocator.h> #include "DeviceCuda.h" #include <config/vtable.h> #include <config/vtable.cuh> #include <gamalib/utils/cuda_utils.cuh> #include <gamalib/GenericKernels/KernelCuda.cuh> #include <gamalib/utils/x86_utils.h> #define PostWork(W) outbox->enqueue(W); typedef unsigned char byte; __global__ //__launch_bounds__(256, 5) void genericKernel( Workqueue<work, INBOX_QUEUE_SIZE, GPU_CUDA>* INBOX) { unsigned long index = blockIdx.x * blockDim.x + threadIdx.x; work* w_item; if ((w_item = INBOX->data[index]) != NULL) { (w_item->*WORK_GPU_TABLE[w_item->getWorkTypeID()])(); } } __global__ //__launch_bounds__(256, 5) void genericWideKernel(work* w_item) { (w_item->*WORK_GPU_TABLE[w_item->getWorkTypeID()])(); } __inline__ void CudaTest(char *msg) { cudaError_t e; if (cudaSuccess != (e = cudaGetLastError())) { fprintf(stderr, "%s: %d\n", msg, e); fprintf(stderr, "%s\n", cudaGetErrorString(e)); exit(-1); } } void DeviceCuda::classWideKernel(work* witem, unsigned st) { cudaSetDevice(cudaDeviceID); #if defined GAMA_CACHE std::vector<pointerInfo>* Lw = (witem->*TOCACHEW_CPU_TABLE[witem->getWorkTypeID()])(); std::vector<pointerInfo>* Lr = (witem->*TOCACHER_CPU_TABLE[witem->getWorkTypeID()])(); for(int i=0; i< Lr->size(); i++) { cache->cachePtr((*Lr)[i].ptr,(*Lr)[i].lenght,&streams[st]); } #endif if (!IS_LIB_CALL(witem->WORK_TYPE_ID)) { genericWideKernel<<< cdp.multiProcessorCount * 5, 256, 0, streams[st]>>>(witem); CudaTest("Launching parallel wide kernel"); } else { (witem->*WORK_GPU_LIB_TABLE[witem->getWorkTypeID()])(); } CudaTest("Launching parallel wide kernel"); #if defined GAMA_CACHE delete Lw; delete Lr; #endif }
aad62bb551cbfbaced91b7b7b267eedcb4a6d19a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdint.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; // Sequential radix sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} void sortByHost(const uint32_t * in, int n, uint32_t * out, int nBits) { int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this copy uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; // Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit) // (Each digit consists of nBits bits) // In each loop, sort elements according to the current digit // (using STABLE counting sort) for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit memset(hist,0,nBins*sizeof(int)); for(int i=0;i<n;i++) { int bin=(src[i]>>bit)&(nBins-1); hist[bin]++; } // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0]=0; for(int bin=1;bin<nBins;bin++) { histScan[bin]=histScan[bin-1]+hist[bin-1]; } // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for(int i=0;i<n;i++) { int bin=(src[i]>>bit)&(nBins-1); dst[histScan[bin]]=src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t * tmp=src; src=dst; dst=tmp; } // TODO: Copy result to "out" memcpy(out,src,n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); } /* Use SMEM. */ __global__ void computeHistKernel2(int * in, int n, int * hist, int nBins, int bit) { // TODO // Each block computes its local hist using atomic on SMEM extern __shared__ int s_hist[]; int i=blockDim.x*blockIdx.x+threadIdx.x; // Gn gi tr 0 cho local hist // Nu nBins> blockDim.x th mi thread s gn gi tr cho phn t bin cch mi stride=blockDim.x for(int stride=0;stride<nBins;stride+=blockDim.x) if(threadIdx.x+stride<nBins) s_hist[threadIdx.x+stride]=0; __syncthreads();// syncthreads chc chn cc phn t trong s_hist c gn gi tr 0 // Tnh local hist if(i<n) { int bin=(in[i]>>bit)&(nBins-1);// ly nBits ra tnh xem phn t ny thuc bin no atomicAdd(&s_hist[bin], 1); } __syncthreads();// syncthreads chc chn cc phn t trong block c tnh trong s_hist // Each block adds its local hist to global hist using atomic on GMEM for(int stride=0;stride<nBins;stride+=blockDim.x) if(threadIdx.x+stride<nBins) atomicAdd(&hist[threadIdx.x+stride],s_hist[threadIdx.x+stride]); } // TODO: You can define necessary functions here // Cng gi tr blkSum vo cc phn t tng ng __global__ void addBlkKernel(int * in, int n, int * blkSums) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<n&&blockIdx.x>0) in[i]+=blkSums[blockIdx.x-1]; } __global__ void scatter(int * in, int * out, int *inScan, int n, int nZeros) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<n) { if (in[i]==0) out[i - inScan[i]] = in[i]; else out[nZeros + inScan[i]] = in[i]; } } __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums) { // TODO extern __shared__ int s_in[]; int i=blockDim.x*blockIdx.x+threadIdx.x; // gn gi tr tng ng vo smem if(i<n) s_in[threadIdx.x]=in[i]; else s_in[threadIdx.x]=0; __syncthreads(); // cng cc gi cch nhau stride bc li vi nhau for(int stride=1;stride<blockDim.x;stride*=2) { int temp=0; if(threadIdx.x>=stride) { temp=s_in[threadIdx.x-stride];// ly phn t trc stride bc } __syncthreads();// chc chn gi tr nm trc stride bc c ly vo b nh thanh ghi if(threadIdx.x>=stride ) { s_in[threadIdx.x]+=temp; } __syncthreads();// chc chn cc gi tr c cng xong } // gn gi tr tng ng vo mng out if(i<n) out[i]=s_in[threadIdx.x]; // thread cui cng trong block ghi gi tr vo blkSums theo blockIdx if(blkSums!=NULL) { if(threadIdx.x==blockDim.x-1) { blkSums[blockIdx.x]=s_in[threadIdx.x]; } } } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO int * inScan = (int *)malloc(n * sizeof(int)); int * d_in,*d_inScan,*d_blkSums,*d_out; dim3 blockSize1(blockSizes[0]); dim3 gridSize1((n - 1) / blockSize1.x + 1); dim3 blockSize2(blockSizes[1]); dim3 gridSize2((n - 1) / blockSize2.x + 1); CHECK(hipMalloc(&d_in, n * sizeof(int))); CHECK(hipMalloc(&d_inScan,n*sizeof(int))); CHECK(hipMalloc(&d_blkSums,gridSize2.x*sizeof(int))); CHECK(hipMalloc(&d_out, n * sizeof(int))); CHECK(hipMemcpy(d_in, in, n * sizeof(int), hipMemcpyHostToDevice)); int* blkSums = (int *)malloc(gridSize2.x*sizeof(int)); hipLaunchKernelGGL(( scanBlkKernel), dim3(gridSize2),dim3(blockSize2),gridSize2.x, 0, d_in,n,d_inScan,d_blkSums); // bt li hm kernel hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) { printf("Sync kernel error 1: %s\n", hipGetErrorString(errSync)); return; } if (errAsync != hipSuccess) { printf("Async kernel error 1: %s\n", hipGetErrorString(errAsync)); return; } CHECK(hipMemcpy(blkSums,d_blkSums,gridSize2.x*sizeof(int),hipMemcpyDeviceToHost)); for(int i=1;i<gridSize2.x;i++) { blkSums[i]+=blkSums[i-1]; } CHECK(hipMemcpy(d_blkSums,blkSums,gridSize2.x*sizeof(int),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( addBlkKernel), dim3(gridSize2),dim3(blockSize2), 0, 0, d_inScan,n,d_blkSums); // bt li hm kernel errSync = hipGetLastError(); errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) { printf("Sync kernel error 2: %s\n", hipGetErrorString(errSync)); return; } if (errAsync != hipSuccess) { printf("Async kernel error 2: %s\n", hipGetErrorString(errAsync)); return; } // copy lch qua 1 phn t CHECK(hipMemcpy(&inScan[1],d_inScan,(n-1)*sizeof(int),hipMemcpyDeviceToHost)); inScan[0]=0; CHECK(hipMemcpy(d_inScan,inScan,n*sizeof(int),hipMemcpyHostToDevice)); inScan[0]=0; int nZeros = n - inScan[n-1] - in[n-1]; hipLaunchKernelGGL(( scatter), dim3(gridSize2),dim3(blockSize2), 0, 0, d_in,d_out,d_inScan,n,nZeros); // bt li hm kernel errSync = hipGetLastError(); errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) { printf("Sync kernel error 3: %s\n", hipGetErrorString(errSync)); return; } if (errAsync != hipSuccess) { printf("Async kernel error 3: %s\n", hipGetErrorString(errAsync)); return; } CHECK(hipMemcpy(out,d_out,n*sizeof(int),hipMemcpyDeviceToHost)); free(inScan); CHECK(hipFree(d_in)); CHECK(hipFree(d_inScan)); } // Radix sort void sort(const uint32_t * in, int n, uint32_t * out, int nBits, bool useDevice=false, int * blockSizes=NULL) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nRadix sort by host\n"); sortByHost(in, n, out, nBits); } else // use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { hipDeviceProp_t devProv; CHECK(hipGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; // n = 1000000; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out = (uint32_t *)malloc(bytes); // Device result uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand()%2; // printArray(in, n); // SET UP NBITS int nBits = 1; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits); //printArray(correctOut, n); // SORT BY DEVICE sort(in, n, out, nBits, true, blockSizes); //printArray(out,n); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
aad62bb551cbfbaced91b7b7b267eedcb4a6d19a.cu
#include <stdio.h> #include <stdint.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; // Sequential radix sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} void sortByHost(const uint32_t * in, int n, uint32_t * out, int nBits) { int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this copy uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; // Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit) // (Each digit consists of nBits bits) // In each loop, sort elements according to the current digit // (using STABLE counting sort) for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit memset(hist,0,nBins*sizeof(int)); for(int i=0;i<n;i++) { int bin=(src[i]>>bit)&(nBins-1); hist[bin]++; } // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0]=0; for(int bin=1;bin<nBins;bin++) { histScan[bin]=histScan[bin-1]+hist[bin-1]; } // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for(int i=0;i<n;i++) { int bin=(src[i]>>bit)&(nBins-1); dst[histScan[bin]]=src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t * tmp=src; src=dst; dst=tmp; } // TODO: Copy result to "out" memcpy(out,src,n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); } /* Use SMEM. */ __global__ void computeHistKernel2(int * in, int n, int * hist, int nBins, int bit) { // TODO // Each block computes its local hist using atomic on SMEM extern __shared__ int s_hist[]; int i=blockDim.x*blockIdx.x+threadIdx.x; // Gán giá trị 0 cho local hist // Nếu nBins> blockDim.x thì mỗi thread sẽ gán giá trị cho phần tử bin cách mỗi stride=blockDim.x for(int stride=0;stride<nBins;stride+=blockDim.x) if(threadIdx.x+stride<nBins) s_hist[threadIdx.x+stride]=0; __syncthreads();// syncthreads để chắc chắn các phần tử trong s_hist đã được gắn giá trị 0 // Tính local hist if(i<n) { int bin=(in[i]>>bit)&(nBins-1);// lấy nBits ra để tính xem phần tử này thuộc bin nào atomicAdd(&s_hist[bin], 1); } __syncthreads();// syncthreads để chắc chắn các phần tử trong block đã được tính trong s_hist // Each block adds its local hist to global hist using atomic on GMEM for(int stride=0;stride<nBins;stride+=blockDim.x) if(threadIdx.x+stride<nBins) atomicAdd(&hist[threadIdx.x+stride],s_hist[threadIdx.x+stride]); } // TODO: You can define necessary functions here // Cộng giá trị blkSum vào các phần tử tương ứng __global__ void addBlkKernel(int * in, int n, int * blkSums) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<n&&blockIdx.x>0) in[i]+=blkSums[blockIdx.x-1]; } __global__ void scatter(int * in, int * out, int *inScan, int n, int nZeros) { int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<n) { if (in[i]==0) out[i - inScan[i]] = in[i]; else out[nZeros + inScan[i]] = in[i]; } } __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums) { // TODO extern __shared__ int s_in[]; int i=blockDim.x*blockIdx.x+threadIdx.x; // gán giá trị tương ứng vào smem if(i<n) s_in[threadIdx.x]=in[i]; else s_in[threadIdx.x]=0; __syncthreads(); // cộng các giá cách nhau stride bước lại với nhau for(int stride=1;stride<blockDim.x;stride*=2) { int temp=0; if(threadIdx.x>=stride) { temp=s_in[threadIdx.x-stride];// lấy phần tử trước đó stride bước } __syncthreads();// chắc chắn giá trị năm trước stride bước đã được lấy vào bộ nhớ thanh ghi if(threadIdx.x>=stride ) { s_in[threadIdx.x]+=temp; } __syncthreads();// chắc chắn các giá trị đã được cộng xong } // gán giá trị tương ứng vào mảng out if(i<n) out[i]=s_in[threadIdx.x]; // thread cuối cùng trong block ghi giá trị vào blkSums theo blockIdx if(blkSums!=NULL) { if(threadIdx.x==blockDim.x-1) { blkSums[blockIdx.x]=s_in[threadIdx.x]; } } } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO int * inScan = (int *)malloc(n * sizeof(int)); int * d_in,*d_inScan,*d_blkSums,*d_out; dim3 blockSize1(blockSizes[0]); dim3 gridSize1((n - 1) / blockSize1.x + 1); dim3 blockSize2(blockSizes[1]); dim3 gridSize2((n - 1) / blockSize2.x + 1); CHECK(cudaMalloc(&d_in, n * sizeof(int))); CHECK(cudaMalloc(&d_inScan,n*sizeof(int))); CHECK(cudaMalloc(&d_blkSums,gridSize2.x*sizeof(int))); CHECK(cudaMalloc(&d_out, n * sizeof(int))); CHECK(cudaMemcpy(d_in, in, n * sizeof(int), cudaMemcpyHostToDevice)); int* blkSums = (int *)malloc(gridSize2.x*sizeof(int)); scanBlkKernel<<<gridSize2,blockSize2,gridSize2.x>>>(d_in,n,d_inScan,d_blkSums); // bắt lỗi hàm kernel cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) { printf("Sync kernel error 1: %s\n", cudaGetErrorString(errSync)); return; } if (errAsync != cudaSuccess) { printf("Async kernel error 1: %s\n", cudaGetErrorString(errAsync)); return; } CHECK(cudaMemcpy(blkSums,d_blkSums,gridSize2.x*sizeof(int),cudaMemcpyDeviceToHost)); for(int i=1;i<gridSize2.x;i++) { blkSums[i]+=blkSums[i-1]; } CHECK(cudaMemcpy(d_blkSums,blkSums,gridSize2.x*sizeof(int),cudaMemcpyHostToDevice)); addBlkKernel<<<gridSize2,blockSize2>>>(d_inScan,n,d_blkSums); // bắt lỗi hàm kernel errSync = cudaGetLastError(); errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) { printf("Sync kernel error 2: %s\n", cudaGetErrorString(errSync)); return; } if (errAsync != cudaSuccess) { printf("Async kernel error 2: %s\n", cudaGetErrorString(errAsync)); return; } // copy lệch qua 1 phần tử CHECK(cudaMemcpy(&inScan[1],d_inScan,(n-1)*sizeof(int),cudaMemcpyDeviceToHost)); inScan[0]=0; CHECK(cudaMemcpy(d_inScan,inScan,n*sizeof(int),cudaMemcpyHostToDevice)); inScan[0]=0; int nZeros = n - inScan[n-1] - in[n-1]; scatter<<<gridSize2,blockSize2>>>(d_in,d_out,d_inScan,n,nZeros); // bắt lỗi hàm kernel errSync = cudaGetLastError(); errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) { printf("Sync kernel error 3: %s\n", cudaGetErrorString(errSync)); return; } if (errAsync != cudaSuccess) { printf("Async kernel error 3: %s\n", cudaGetErrorString(errAsync)); return; } CHECK(cudaMemcpy(out,d_out,n*sizeof(int),cudaMemcpyDeviceToHost)); free(inScan); CHECK(cudaFree(d_in)); CHECK(cudaFree(d_inScan)); } // Radix sort void sort(const uint32_t * in, int n, uint32_t * out, int nBits, bool useDevice=false, int * blockSizes=NULL) { GpuTimer timer; timer.Start(); if (useDevice == false) { printf("\nRadix sort by host\n"); sortByHost(in, n, out, nBits); } else // use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; // n = 1000000; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out = (uint32_t *)malloc(bytes); // Device result uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand()%2; // printArray(in, n); // SET UP NBITS int nBits = 1; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits); //printArray(correctOut, n); // SORT BY DEVICE sort(in, n, out, nBits, true, blockSizes); //printArray(out,n); checkCorrectness(out, correctOut, n); // FREE MEMORIES free(in); free(out); free(correctOut); return EXIT_SUCCESS; }
f28b47f88a1ae6658b2088833314968f2b279fef.hip
// !!! This is a file automatically generated by hipify!!! #include "figures.h" #include "figures.h" #include "hip/hip_runtime_api.h" #include "../cuda_numerics/float3.h" #include "stdio.h" FUNC_PREF float3 Ray::point(float t) { return org + dir*t; } //----------------------------------Constructors----------------------------------------- FUNC_PREF Ray::Ray() { tmin = 0.0f; tmax = INFINITY; } FUNC_PREF Ray::Ray(float3 start, float3 dir) { tmin = 0.0f; tmax = INFINITY; this->org = start; this->dir = dir; } FUNC_PREF Ray::Ray(float3 start, float3 dir, float tmin) { this->org = start; this->dir = dir; this->tmin = tmin; this->tmax = INFINITY; } FUNC_PREF Ray::Ray(float3 start, float3 dir, float tmin, float tmax) { this->org = start; this->dir = dir; this->tmin = tmin; this->tmax = tmax; } FUNC_PREF Ray make_ray(float3 org, float3 dir) { Ray new_ray; new_ray.org = org; new_ray.dir = dir; new_ray.tmin = 0; new_ray.tmax = INFINITY; return new_ray; } FUNC_PREF Ray make_ray(float3 org, float3 dir, float tmin) { Ray new_ray; new_ray.org = org; new_ray.dir = dir; new_ray.tmin = tmin; new_ray.tmax = INFINITY; return new_ray; } FUNC_PREF Ray make_ray(float3 org, float3 dir, float tmin, float tmax) { Ray new_ray; new_ray.org = org; new_ray.dir = dir; new_ray.tmin = tmin; new_ray.tmax = tmax; return new_ray; } FUNC_PREF void Ray::set(float3 org, float3 dir, float tmin, float tmax) { this->org = org; this->dir = dir; this->tmin = tmin; this->tmax = tmax; } FUNC_PREF void Ray::set(float3 org, float3 dir, float tmin) { this->org = org; this->dir = dir; this->tmin = tmin; this->tmax = INFINITY; } FUNC_PREF void Ray::set(float3 org, float3 dir) { this->org = org; this->dir = dir; this->tmin = 0; this->tmax = INFINITY; } FUNC_PREF void Ray::print() const { printf("-- Ray: <org, dir> = <{%f, %f, %f}, {%f, %f, %f}>\n", org.x, org.y, org.z, dir.x, dir.y, dir.z); }
f28b47f88a1ae6658b2088833314968f2b279fef.cu
#include "figures.h" #include "figures.h" #include "cuda_runtime_api.h" #include "../cuda_numerics/float3.h" #include "stdio.h" FUNC_PREF float3 Ray::point(float t) { return org + dir*t; } //----------------------------------Constructors----------------------------------------- FUNC_PREF Ray::Ray() { tmin = 0.0f; tmax = INFINITY; } FUNC_PREF Ray::Ray(float3 start, float3 dir) { tmin = 0.0f; tmax = INFINITY; this->org = start; this->dir = dir; } FUNC_PREF Ray::Ray(float3 start, float3 dir, float tmin) { this->org = start; this->dir = dir; this->tmin = tmin; this->tmax = INFINITY; } FUNC_PREF Ray::Ray(float3 start, float3 dir, float tmin, float tmax) { this->org = start; this->dir = dir; this->tmin = tmin; this->tmax = tmax; } FUNC_PREF Ray make_ray(float3 org, float3 dir) { Ray new_ray; new_ray.org = org; new_ray.dir = dir; new_ray.tmin = 0; new_ray.tmax = INFINITY; return new_ray; } FUNC_PREF Ray make_ray(float3 org, float3 dir, float tmin) { Ray new_ray; new_ray.org = org; new_ray.dir = dir; new_ray.tmin = tmin; new_ray.tmax = INFINITY; return new_ray; } FUNC_PREF Ray make_ray(float3 org, float3 dir, float tmin, float tmax) { Ray new_ray; new_ray.org = org; new_ray.dir = dir; new_ray.tmin = tmin; new_ray.tmax = tmax; return new_ray; } FUNC_PREF void Ray::set(float3 org, float3 dir, float tmin, float tmax) { this->org = org; this->dir = dir; this->tmin = tmin; this->tmax = tmax; } FUNC_PREF void Ray::set(float3 org, float3 dir, float tmin) { this->org = org; this->dir = dir; this->tmin = tmin; this->tmax = INFINITY; } FUNC_PREF void Ray::set(float3 org, float3 dir) { this->org = org; this->dir = dir; this->tmin = 0; this->tmax = INFINITY; } FUNC_PREF void Ray::print() const { printf("-- Ray: <org, dir> = <{%f, %f, %f}, {%f, %f, %f}>\n", org.x, org.y, org.z, dir.x, dir.y, dir.z); }
Algorithms.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Algorithms.h" #include "Algorithms.cuh" #include "ExpManager.h" #include "ThreefryGPU.h" #include "GPUDna.cuh" #include <cstdint> #include <stdio.h> #include <unistd.h> #include <iostream> #include<cuda.h> #include<hip/hip_runtime_api.h> using namespace std; #define DEBUG 1 // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } constexpr int32_t PROMOTER_ARRAY_SIZE = 10000; void transfer_in(ExpManager* exp_m, bool first_gen) { exp_m->rng_->initDevice(); std::vector<size_t> host_dna_size(exp_m->nb_indivs_); std::vector<size_t> host_dna_offset(exp_m->nb_indivs_); // Compute sizes: // * global_dna_size // * host_dna_offset[] // * host_max_dna_size // * host_dna_size[] global_dna_size = 0; for (int i = 0; i < exp_m->nb_indivs_; i++) { host_dna_offset[i] = global_dna_size; global_dna_size += exp_m->internal_organisms_[i]->dna_->seq_.size(); host_max_dna_size = host_max_dna_size < exp_m->internal_organisms_[i]->dna_->seq_.size() ? exp_m->internal_organisms_[i]->dna_->seq_.size() : host_max_dna_size; host_dna_size[i] = exp_m->internal_organisms_[i]->dna_->seq_.size(); } // Create shorthands auto seq0 = exp_m->internal_organisms_[0]->dna_->seq_.data(); auto len0 = exp_m->internal_organisms_[0]->dna_->seq_.size(); allocated_global_dna_size = global_dna_size*5; // Allocate mem for the meta dna checkCuda(hipMalloc((void **) &next_gen_dna, allocated_global_dna_size * sizeof(char))); checkCuda(hipMalloc((void**) &dna, allocated_global_dna_size * sizeof(char))); // Tranfer **the first** indiv's sequence checkCuda(hipMemcpy(dna, seq0, len0 * sizeof(char), hipMemcpyHostToDevice)); // Send dna_size array checkCuda(hipMalloc((void**) &dna_size, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(hipMemcpy(dna_size, host_dna_size.data(), exp_m->nb_indivs_ * sizeof(size_t), hipMemcpyHostToDevice)); checkCuda(hipMalloc((void **) &nb_mut_bp, 1 * sizeof(unsigned long long int))); checkCuda(hipMemset(nb_mut_bp, 0, 1 * sizeof(unsigned long long int))); // Launch kernel to clone initial genome into the whole pop int x_dim_size = (len0 / 128)+1; int y_dim_size = exp_m->nb_indivs_; dim3 dimGrid(x_dim_size,y_dim_size); hipLaunchKernelGGL(( clone_init_indiv), dim3(dimGrid),dim3(128), 0, 0, dna_size, dna); checkCuda(hipMalloc((void**) &dna_term, allocated_global_dna_size * sizeof(int8_t*))); checkCuda(hipMalloc((void**) &start_protein, allocated_global_dna_size * sizeof(int8_t*))); checkCuda(hipMalloc((void**) &dna_offset, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(hipMemcpy(dna_offset, host_dna_offset.data(), exp_m->nb_indivs_ * sizeof(size_t), hipMemcpyHostToDevice)); checkCuda(hipMalloc((void**) &next_gen_dna_offset, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(hipMalloc((void**) &next_gen_dna_size, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(hipMalloc((void**) &nb_mutations, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(hipMemset(nb_mutations, 0, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(hipMalloc((void**) &mutations_offset, exp_m->nb_indivs_ * sizeof(int))); checkCuda(hipMemset(mutations_offset, 0, exp_m->nb_indivs_ * sizeof(int))); checkCuda(hipMalloc((void**) &mutations_idx, exp_m->nb_indivs_ * sizeof(int))); checkCuda(hipMemset(mutations_idx, 0, exp_m->nb_indivs_ * sizeof(int))); checkCuda(hipMalloc((void**) &dna_mutator_list, exp_m->nb_indivs_ * sizeof(GPUDnaMutator))); current_size_tab_mutation = exp_m->nb_indivs_ * 100; checkCuda(hipMalloc(&tab_mutation, current_size_tab_mutation * sizeof(TypeMutation))); checkCuda(hipMalloc((void**) &rna_idx, (exp_m->nb_indivs_ + 1) * sizeof(int32_t))); checkCuda(hipMemset(rna_idx, 0, (exp_m->nb_indivs_ + 1) * sizeof(int32_t))); checkCuda(hipMalloc((void**) &rna_offset, exp_m->nb_indivs_ * sizeof(int32_t))); checkCuda(hipMemset(rna_offset, 0, exp_m->nb_indivs_ * sizeof(int32_t))); checkCuda(hipMalloc((void**) &protein_idx, (exp_m->nb_indivs_ + 1) * sizeof(int32_t))); checkCuda( hipMemset(protein_idx, 0, (exp_m->nb_indivs_ + 1) * sizeof(int32_t))); checkCuda(hipMalloc((void**) &protein_offset, exp_m->nb_indivs_ * sizeof(int32_t))); checkCuda(hipMemset(protein_offset, 0, exp_m->nb_indivs_ * sizeof(int32_t))); checkCuda(hipMalloc((void**) &next_generation_reproducer, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(hipMalloc((void**) &nb_promoters, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(hipMemset(nb_promoters, 0, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(hipMalloc((void**) &nb_proteins, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(hipMemset(nb_proteins, 0, (exp_m->nb_indivs_ + 1) * sizeof(int))); host_phenotype = (double**) malloc(exp_m->nb_indivs_ * sizeof(double*)); checkCuda( hipMalloc((void***) &phenotype, exp_m->nb_indivs_ * sizeof(double*))); host_phenotype_activ = (double**) malloc(exp_m->nb_indivs_ * sizeof(double*)); checkCuda(hipMalloc((void***) &phenotype_activ, exp_m->nb_indivs_ * sizeof(double*))); host_phenotype_inhib = (double**) malloc(exp_m->nb_indivs_ * sizeof(double*)); checkCuda(hipMalloc((void***) &phenotype_inhib, exp_m->nb_indivs_ * sizeof(double*))); for (int indiv_id = 0; indiv_id < exp_m->nb_indivs_; indiv_id++) { checkCuda( hipMalloc((void**) &host_phenotype[indiv_id], 300 * sizeof(double))); checkCuda(hipMemset(host_phenotype[indiv_id], 0.0, 300 * sizeof(double))); checkCuda(hipMalloc((void**) &host_phenotype_activ[indiv_id], 300 * sizeof(double))); checkCuda( hipMemset(host_phenotype_activ[indiv_id], 0.0, 300 * sizeof(double))); checkCuda(hipMalloc((void**) &host_phenotype_inhib[indiv_id], 300 * sizeof(double))); checkCuda( hipMemset(host_phenotype_inhib[indiv_id], 0.0, 300 * sizeof(double))); } current_size_rna_list = exp_m->nb_indivs_ * 10000; checkCuda(hipMalloc(&rna, current_size_rna_list * sizeof(pRNA))); current_size_protein_list = exp_m->nb_indivs_ * 1000; checkCuda(hipMalloc(&protein, current_size_protein_list * sizeof(pProtein))); checkCuda( hipMemcpy(phenotype, host_phenotype, exp_m->nb_indivs_ * sizeof(double*), hipMemcpyHostToDevice)); checkCuda(hipMemcpy(phenotype_activ, host_phenotype_activ, exp_m->nb_indivs_ * sizeof(double*), hipMemcpyHostToDevice)); checkCuda(hipMemcpy(phenotype_inhib, host_phenotype_inhib, exp_m->nb_indivs_ * sizeof(double*), hipMemcpyHostToDevice)); checkCuda(hipMalloc((void**) &target, 300 * sizeof(double))); double target_host[300]; for (int i = 0; i < 300; i++) { target_host[i] = exp_m->target[i]; } checkCuda(hipMemcpy(target, target_host, 300 * sizeof(double), hipMemcpyHostToDevice)); checkCuda(hipMalloc((void**) &metaerror, exp_m->nb_indivs_ * sizeof(double))); checkCuda(hipMalloc((void**) &fitness, exp_m->nb_indivs_ * sizeof(double))); //printf("GPU Counter %d\n",exp_m->rng_->counters().size()); checkCuda(hipMalloc((void**) &gpu_counters, exp_m->rng_->counters().size() * sizeof(unsigned long long))); checkCuda(hipMemcpy(gpu_counters, exp_m->rng_->counters().data(), exp_m->rng_->counters().size() * sizeof(unsigned long long), hipMemcpyHostToDevice)); } __global__ // Copy first indiv's dna into all the other indivs' dna void clone_init_indiv(size_t* dna_size, char* dna) { int dna_chunk_idx = blockIdx.x; int indiv_id = blockIdx.y; if(indiv_id == 0) return; // don't copy indiv 0 onto itself int pos = (dna_chunk_idx*128)+threadIdx.x; if (pos < dna_size[0]) { dna[indiv_id*dna_size[0] + pos] = dna[pos]; } } __global__ void search_start_stop_RNA(size_t* dna_size, char* dna, size_t* dna_offset, int* nb_promoters, int8_t* dna_term, int nb_indivs, int global_dna_size, unsigned long long* nb_mut_bp) { int dna_pos_block = blockIdx.x; int indiv_id = blockIdx.y; int dna_pos = (dna_pos_block*128)+threadIdx.x; __shared__ int nb_prom_block; if (threadIdx.x == 0) { nb_prom_block = 0; } __syncthreads(); if (dna_pos < dna_size[indiv_id] && dna_size[indiv_id] >= PROM_SIZE) { dna_term[dna_offset[indiv_id]+dna_pos] = 22; //atomicAdd(nb_mut_bp,1); int prom_dist[22]; int term_dist[4]; for (int motif_id = 0; motif_id < 26; motif_id++) { if (motif_id < 22) { prom_dist[motif_id] = PROM_SEQ[motif_id] == dna[dna_pos + motif_id >= dna_size[indiv_id] ? dna_offset[indiv_id]+ dna_pos + motif_id - dna_size[indiv_id] : dna_offset[indiv_id]+ dna_pos + motif_id] ? 0 : 1; } else if (motif_id >= 22) { int t_motif_id = motif_id - 22; term_dist[t_motif_id] = dna[dna_pos + t_motif_id >= dna_size[indiv_id] ? dna_offset[indiv_id]+dna_pos + t_motif_id - dna_size[indiv_id] : dna_offset[indiv_id]+ dna_pos + t_motif_id] != dna[dna_pos - t_motif_id + 10 >= dna_size[indiv_id] ? dna_offset[indiv_id]+ dna_pos - t_motif_id + 10 - dna_size[indiv_id] : dna_offset[indiv_id]+ dna_pos - t_motif_id + 10] ? 1 : 0; } } int8_t dist_prom = prom_dist[0] + prom_dist[1] + prom_dist[2] + prom_dist[3] + prom_dist[4] + prom_dist[5] + prom_dist[6] + prom_dist[7] + prom_dist[8] + prom_dist[9] + prom_dist[10] + prom_dist[11] + prom_dist[12] + prom_dist[13] + prom_dist[14] + prom_dist[15] + prom_dist[16] + prom_dist[17] + prom_dist[18] + prom_dist[19] + prom_dist[20] + prom_dist[21]; dna_term[dna_offset[indiv_id]+dna_pos] = dist_prom; if (dist_prom <= 4) { int rna_idx = atomicAdd(&nb_prom_block, 1); } int dist_term = term_dist[0] + term_dist[1] + term_dist[2] + term_dist[3]; dna_term[dna_offset[indiv_id]+dna_pos] |= dist_term == 4 ? 1<<7 : 0; } __syncthreads(); if (threadIdx.x == 0) { atomicAdd(nb_promoters+indiv_id,nb_prom_block); atomicAdd(nb_promoters+nb_indivs,nb_prom_block); } } __global__ void compute_RNA_offset(int* nb_promoters, int* rna_offset) { const int indiv_id = blockIdx.x; __shared__ int grid_rna_offset; if (threadIdx.x == 0) { grid_rna_offset = 0; } __syncthreads(); { int local_rna_offset = 0; for (int cpt = threadIdx.x; cpt < indiv_id; cpt += blockDim.x) { local_rna_offset += nb_promoters[cpt]; } if (local_rna_offset > 0) atomicAdd(&grid_rna_offset, local_rna_offset); } __syncthreads(); if (threadIdx.x == 0) { rna_offset[indiv_id] = grid_rna_offset; } } __global__ void fill_RNA( int8_t* dna_term, size_t* dna_size, size_t* dna_offset, int* nb_promoters, int* rna_offset, pRNA* rnas, int32_t* rna_idx, int nb_indiv) { int dna_pos_block = blockIdx.x; int indiv_id = blockIdx.y; int dna_pos = (dna_pos_block * 128) + threadIdx.x; if (dna_pos < dna_size[indiv_id] && dna_size[indiv_id] >= PROM_SIZE) { // Masque le bit de poid fort int8_t dist = dna_term[dna_offset[indiv_id]+dna_pos] & (0x7F); if (dist <= 4) { int local_rna_idx = atomicAdd(rna_idx + indiv_id, 1); atomicAdd(rna_idx + nb_indiv, 1); rnas[rna_offset[indiv_id] + local_rna_idx].begin = dna_pos; rnas[rna_offset[indiv_id] + local_rna_idx].dist = dist; rnas[rna_offset[indiv_id] + local_rna_idx].transcribed = false; rnas[rna_offset[indiv_id] + local_rna_idx].indiv_id = indiv_id; } } } __global__ void compute_RNA( int8_t* dna_term, size_t* dna_size, size_t* dna_offset, pRNA* rnas, int global_nb_rna) { const int globalIdx = blockIdx.x*blockDim.x+threadIdx.x; if (globalIdx < global_nb_rna ) { int indiv_id = rnas[globalIdx].indiv_id; if (dna_size[indiv_id] >= PROM_SIZE) { int k = rnas[globalIdx].begin + 22; k = k >= dna_size[indiv_id] ? k - dna_size[indiv_id] : k; int k_end = k; bool found=false; do { //printf("%d -- %d %ld\n",indiv_id,k,dna_size[indiv_id]); if (dna_term[dna_offset[indiv_id]+k] & (1<<7)) { int32_t rna_end = k + 10 >= dna_size[indiv_id] ? k + 10 - dna_size[indiv_id] : k + 10; int32_t rna_length = 0; if (rnas[globalIdx].begin > rna_end) rna_length = dna_size[indiv_id] - rnas[globalIdx].begin + rna_end; else rna_length = rna_end - rnas[globalIdx].begin; if (rna_length < 19) { rnas[globalIdx].begin = 0; rnas[globalIdx].end = 0; rnas[globalIdx].length = 0; rnas[globalIdx].transcribed = false; break; } rnas[globalIdx].end = rna_end; rnas[globalIdx].transcribed = true; rnas[globalIdx].length = rna_length; if (rnas[globalIdx].end>=dna_size[indiv_id]) { printf("Termin %d %d S %d %ld\n", rnas[globalIdx].begin, rnas[globalIdx].end,indiv_id,dna_size[indiv_id]); //assert(rnas[globalIdx].end<dna_size[indiv_id]); } found=true; break; } k++; k = k >= dna_size[indiv_id] ? k - dna_size[indiv_id] : k; } while (k != k_end); } } else { rnas[globalIdx].begin = 0; rnas[globalIdx].end = 0; rnas[globalIdx].length = 0; rnas[globalIdx].transcribed = false; } } __global__ void display_RNA( pRNA* rna, size_t* dna_size, int32_t global_nb_rna) { for(int i = 0; i < global_nb_rna; i++) { if (rna[i].transcribed) if (rna[i].end>=dna_size[rna[i].indiv_id]) { printf("UIIH %d %d S %d -- %ld\n",rna[i].begin,rna[i].end,rna[i].indiv_id,dna_size[rna[i].indiv_id]); } } } __global__ void compute_start_protein(int8_t* start_protein, size_t* dna_size, size_t* dna_offset, pRNA* rna, char* dna, int32_t* nb_proteins, int32_t global_nb_rna, int nb_indiv) { const int globalIdx = blockIdx.x*blockDim.x+threadIdx.x; int nb_prot = 0; if (globalIdx < global_nb_rna) { if (rna[globalIdx].transcribed) { const int indiv_id = rna[globalIdx].indiv_id; int c_pos = rna[globalIdx].begin; if (rna[globalIdx].length > 22) { c_pos += 22; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; int count_loop=0; if (rna[globalIdx].end>=dna_size[indiv_id]) { printf("ator %d S %d\n",rna[globalIdx].end,indiv_id); assert(rna[globalIdx].end<dna_size[indiv_id]); } while (c_pos != rna[globalIdx].end) { //if (indiv_id==606) printf("%d -- %d %d\n",indiv_id,c_pos,rna[globalIdx].end); bool start = false; int t_pos, k_t; for (int k = 0; k < 9; k++) { k_t = k >= 6 ? k + 4 : k; t_pos = c_pos + k_t >= dna_size[indiv_id] ? c_pos + k_t - dna_size[indiv_id] : c_pos + k_t; count_loop++; if (count_loop>10000) {printf("%d %d %d %d %d %d %d %ld\n",indiv_id,globalIdx,k, c_pos,t_pos, rna[globalIdx].begin,rna[globalIdx].end, dna_size[indiv_id]);assert(0);} if (dna[dna_offset[indiv_id]+t_pos] == SHINE_DAL_SEQ[k]) { start = true; } else { start = false; break; } } start_protein[dna_offset[indiv_id]+c_pos] = start; if (start) { nb_prot++; } c_pos++; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; } } atomicAdd(nb_proteins+indiv_id,nb_prot); atomicAdd(nb_proteins+nb_indiv,nb_prot); } } } __global__ void compute_protein_offset(int32_t* nb_proteins, int* protein_offset) { const int indiv_id = blockIdx.x; __shared__ int grid_protein_offset; if (threadIdx.x == 0) { grid_protein_offset = 0; } __syncthreads(); { int local_protein_offset = 0; for (int cpt = threadIdx.x; cpt < indiv_id; cpt += blockDim.x) { local_protein_offset += nb_proteins[cpt]; } if (local_protein_offset > 0) atomicAdd(&grid_protein_offset, local_protein_offset); } __syncthreads(); if (threadIdx.x == 0) { protein_offset[indiv_id] = grid_protein_offset; } } __global__ void fill_protein(int8_t* start_protein, size_t* dna_offset, int* protein_idx, int* protein_offset, pRNA* rna, pProtein* protein, size_t* dna_size, int32_t global_nb_rna, int nb_indiv) { const int globalIdx = blockIdx.x*blockDim.x+threadIdx.x; if (globalIdx < global_nb_rna) { if (rna[globalIdx].transcribed) { int indiv_id = rna[globalIdx].indiv_id; int c_pos = rna[globalIdx].begin; if (rna[globalIdx].length > 22) { c_pos += 22; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; while (c_pos != rna[globalIdx].end) { if (start_protein[dna_offset[indiv_id]+c_pos] == 1) { int local_protein_idx = atomicAdd(protein_idx + indiv_id, 1); atomicAdd(protein_idx + nb_indiv, 1); protein[protein_offset[indiv_id] + local_protein_idx].protein_start = c_pos; protein[protein_offset[indiv_id] + local_protein_idx].indiv_id = rna[globalIdx].indiv_id; protein[protein_offset[indiv_id] + local_protein_idx].stop_RNA = rna[globalIdx].end; protein[protein_offset[indiv_id] + local_protein_idx].translated = false; protein[protein_offset[indiv_id] + local_protein_idx].e = 1.0 - fabs(((double) rna[globalIdx].dist)) / 5.0; } c_pos++; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; } } } } } __global__ void compute_proteins( int8_t* start_protein, size_t* dna_size, size_t* dna_offset, pProtein* protein, char* dna, int32_t global_nb_protein) { __shared__ int next_protein_idx; if (threadIdx.x == 0) { next_protein_idx = 0; } __syncthreads(); int local_protein_idx = atomicAdd(&next_protein_idx,1); while (local_protein_idx < global_nb_protein) { int indiv_id = protein[local_protein_idx].indiv_id; int start_protein_pos = protein[local_protein_idx].protein_start + 13; int length = -1; start_protein_pos = start_protein_pos >= dna_size[indiv_id] ? start_protein_pos - dna_size[indiv_id] : start_protein_pos; if (protein[local_protein_idx].protein_start < protein[local_protein_idx].stop_RNA) { length = protein[local_protein_idx].stop_RNA - protein[local_protein_idx].protein_start; } else { length = dna_size[indiv_id] - protein[local_protein_idx].protein_start + protein[local_protein_idx].stop_RNA + 1; } length -= 13; bool is_protein = false; length+=1; length = length - (length%3); for (int loop_i = 0; length - loop_i >= 2; loop_i+=3) { int t_k; start_protein_pos = start_protein_pos >= dna_size[indiv_id] ? start_protein_pos - dna_size[indiv_id] : start_protein_pos; is_protein = false; for (int k = 0; k < 3; k++) { t_k = start_protein_pos + k >= dna_size[indiv_id] ? start_protein_pos - dna_size[indiv_id] + k : start_protein_pos + k; if (dna[dna_offset[indiv_id]+t_k] == PROTEIN_END[k]) { is_protein = true; } else { is_protein = false; break; } } if (is_protein) { int prot_length = -1; if (protein[local_protein_idx].protein_start + 13 < t_k) { prot_length = t_k - (protein[local_protein_idx].protein_start + 13); } else { prot_length = dna_size[indiv_id] - (protein[local_protein_idx].protein_start + 13) + t_k; } if (prot_length >= 3) { protein[local_protein_idx].protein_end = t_k; protein[local_protein_idx].protein_length = prot_length; protein[local_protein_idx].translated = true; } break; } start_protein_pos += 3; start_protein_pos = start_protein_pos >= dna_size[indiv_id] ? start_protein_pos - dna_size[indiv_id] : start_protein_pos; } local_protein_idx = atomicAdd(&next_protein_idx,1); } } __global__ void translate_proteins( pProtein* protein, size_t* dna_size, char* dna, size_t* dna_offset, int32_t global_nb_protein, double w_max) { __shared__ int next_protein_idx; if (threadIdx.x == 0) { next_protein_idx = 0; } __syncthreads(); int local_protein_idx = atomicAdd(&next_protein_idx,1); while (local_protein_idx < global_nb_protein) { int indiv_id = protein[local_protein_idx].indiv_id; if (protein[local_protein_idx].translated) { int c_pos = protein[local_protein_idx].protein_start, t_pos; int end_pos = protein[local_protein_idx].protein_end; c_pos += 13; end_pos -= 3; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; end_pos = end_pos < 0 ? dna_size[indiv_id] + end_pos : end_pos; int8_t value = 0; int8_t codon_list[64] = {}; int8_t codon_idx = 0; int32_t count_loop = 0; bool contin = true; while (count_loop < protein[local_protein_idx].protein_length / 3 && codon_idx < 64) { value = 0; for (int8_t i = 0; i < 3; i++) { t_pos = c_pos + i >= dna_size[indiv_id] ? c_pos + i - dna_size[indiv_id] : c_pos + i; if (dna[dna_offset[indiv_id]+t_pos] == '1') value += 1 << (CODON_SIZE - i - 1); } codon_list[codon_idx] = value; codon_idx++; count_loop++; c_pos += 3; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; } double M = 0.0; double W = 0.0; double H = 0.0; int32_t nb_m = 0; int32_t nb_w = 0; int32_t nb_h = 0; bool bin_m = false; // Initializing to false will yield a conservation of the high weight bit bool bin_w = false; // when applying the XOR operator for the Gray to standard conversion bool bin_h = false; for (int i = 0; i < codon_idx; i++) { switch (codon_list[i]) { case CODON_M0 : { // M codon found nb_m++; // Convert Gray code to "standard" binary code bin_m ^= false; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ M <<= 1; M *= 2; // Add this nucleotide's contribution to M if (bin_m) M += 1; break; } case CODON_M1 : { // M codon found nb_m++; // Convert Gray code to "standard" binary code bin_m ^= true; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest bit was found, make a left bitwise shift //~ M <<= 1; M *= 2; // Add this nucleotide's contribution to M if (bin_m) M += 1; break; } case CODON_W0 : { // W codon found nb_w++; // Convert Gray code to "standard" binary code bin_w ^= false; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ W <<= 1; W *= 2; // Add this nucleotide's contribution to W if (bin_w) W += 1; break; } case CODON_W1 : { // W codon found nb_w++; // Convert Gray code to "standard" binary code bin_w ^= true; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ W <<= 1; W *= 2; // Add this nucleotide's contribution to W if (bin_w) W += 1; break; } case CODON_H0 : case CODON_START : // Start codon codes for the same amino-acid as H0 codon { // H codon found nb_h++; // Convert Gray code to "standard" binary code bin_h ^= false; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ H <<= 1; H *= 2; // Add this nucleotide's contribution to H if (bin_h) H += 1; break; } case CODON_H1 : { // H codon found nb_h++; // Convert Gray code to "standard" binary code bin_h ^= true; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ H <<= 1; H *= 2; // Add this nucleotide's contribution to H if (bin_h) H += 1; break; } } } // ---------------------------------------------------------------------------------- // 2) Normalize M, W and H values in [0;1] according to number of codons of each kind // ---------------------------------------------------------------------------------- protein[local_protein_idx].m = nb_m != 0 ? M / (pow(2, nb_m) - 1) : 0.5; protein[local_protein_idx].w = nb_w != 0 ? W / (pow(2, nb_w) - 1) : 0.0; protein[local_protein_idx].h = nb_h != 0 ? H / (pow(2, nb_h) - 1) : 0.5; // ------------------------------------------------------------------------------------ // 3) Normalize M, W and H values according to the allowed ranges (defined in macros.h) // ------------------------------------------------------------------------------------ // x_min <= M <= x_max // w_min <= W <= w_max // h_min <= H <= h_max protein[local_protein_idx].m = (X_MAX - X_MIN) * protein[local_protein_idx].m + X_MIN; protein[local_protein_idx].w = (w_max - W_MIN) * protein[local_protein_idx].w + W_MIN; protein[local_protein_idx].h = (H_MAX - H_MIN) * protein[local_protein_idx].h + H_MIN; if (nb_m == 0 || nb_w == 0 || nb_h == 0 || protein[local_protein_idx].w == 0.0 || protein[local_protein_idx].h == 0.0) { protein[local_protein_idx].is_functional = false; } else { protein[local_protein_idx].is_functional = true; } } local_protein_idx = atomicAdd(&next_protein_idx,1); } } __global__ void compute_phenotype( pProtein* protein, int32_t global_nb_protein, double** phenotype, double** phenotype_activ, double** phenotype_inhib, int nb_indiv) { __shared__ int next_protein_idx; if (threadIdx.x == 0) { next_protein_idx = 0; } __syncthreads(); int local_protein_idx = atomicAdd(&next_protein_idx,1); while (local_protein_idx < global_nb_protein) { int indiv_id = protein[local_protein_idx].indiv_id; if (protein[local_protein_idx].translated) { if (fabs(protein[local_protein_idx].w) < 1e-15 || fabs(protein[local_protein_idx].h) < 1e-15) { } else { if (protein[local_protein_idx].is_functional) { // Compute triangle points' coordinates double x0 = protein[local_protein_idx].m - protein[local_protein_idx].w; double x1 = protein[local_protein_idx].m; double x2 = protein[local_protein_idx].m + protein[local_protein_idx].w; int ix0 = (int) (x0 * 300); int ix1 = (int) (x1 * 300); int ix2 = (int) (x2 * 300); if (ix0 < 0) ix0 = 0; else if (ix0 > (299)) ix0 = 299; if (ix1 < 0) ix1 = 0; else if (ix1 > (299)) ix1 = 299; if (ix2 < 0) ix2 = 0; else if (ix2 > (299)) ix2 = 299; // Compute the first equation of the triangle double incY = (protein[local_protein_idx].h * protein[local_protein_idx].e) / (ix1 - ix0); int count = 1; // Updating value between x0 and x1 for (int i = ix0 + 1; i < ix1; i++) { if (protein[local_protein_idx].h > 0) atomicAdd(&phenotype_activ[indiv_id][i], (incY * (count++))); else atomicAdd(&phenotype_inhib[indiv_id][i], (incY * (count++))); } if (protein[local_protein_idx].h > 0) { atomicAdd(&phenotype_activ[indiv_id][ix1], (protein[local_protein_idx].h * protein[local_protein_idx].e)); } else atomicAdd(&phenotype_inhib[indiv_id][ix1], (protein[local_protein_idx].h * protein[local_protein_idx].e)); // Compute the second equation of the triangle incY = (protein[local_protein_idx].h * protein[local_protein_idx].e) / (ix2 - ix1); count = 1; // Updating value between x1 and x2 for (int i = ix1 + 1; i < ix2; i++) { if (protein[local_protein_idx].h > 0) atomicAdd(&phenotype_activ[indiv_id][i], ((protein[local_protein_idx].h * protein[local_protein_idx].e) - (incY * (count++)))); else atomicAdd(&phenotype_inhib[indiv_id][i], ((protein[local_protein_idx].h * protein[local_protein_idx].e) - (incY * (count++)))); } } } } local_protein_idx = atomicAdd(&next_protein_idx,1); } __syncthreads(); } __global__ void compute_metaerror_fitness(double selection_pressure,double** phenotype, double** phenotype_activ,double** phenotype_inhib, double* target, double* metaerror, double* fitness) { int indiv_id = blockIdx.x; int fuzzy_idx = threadIdx.x; if (phenotype_activ[indiv_id][fuzzy_idx] > 1.0) phenotype_activ[indiv_id][fuzzy_idx] = 1.0; if (phenotype_inhib[indiv_id][fuzzy_idx] < -1.0) phenotype_inhib[indiv_id][fuzzy_idx] = -1.0; phenotype[indiv_id][fuzzy_idx] = phenotype_activ[indiv_id][fuzzy_idx] + phenotype_inhib[indiv_id][fuzzy_idx]; __shared__ double delta[300]; if (phenotype[indiv_id][fuzzy_idx] > 1) phenotype[indiv_id][fuzzy_idx] = 1; if (phenotype[indiv_id][fuzzy_idx] < 0) phenotype[indiv_id][fuzzy_idx] = 0; delta[fuzzy_idx] = phenotype[indiv_id][fuzzy_idx] - target[fuzzy_idx]; __syncthreads(); if (threadIdx.x == 0) { metaerror[indiv_id] = 0; for (int i = 0; i < 299; i++) { metaerror[indiv_id] += ((fabs(delta[i]) + fabs(delta[i + 1])) / (600.0)); } fitness[indiv_id] = exp( -selection_pressure * ((double)metaerror[indiv_id])); } } __device__ int32_t Threefry::Device::roulette_random(double* probs, int32_t nb_elts) { double pick_one = 0.0; while (pick_one == 0.0) { pick_one = randomDouble(); } int32_t found_org = 0; pick_one -= probs[0]; while (pick_one > 0) { assert(found_org<nb_elts-1); pick_one -= probs[++found_org]; } return found_org; } __global__ void selection(double* fitness, int* next_generation_reproducer, unsigned long long* gpu_counters, int grid_width, int grid_height, int nb_indiv) { int indiv_id = blockIdx.x; int neightbor = threadIdx.x; __shared__ double local_fit_array[NEIGHBORHOOD_SIZE]; __shared__ double probs[NEIGHBORHOOD_SIZE]; __shared__ int count; __shared__ double sum_local_fit; int32_t x = indiv_id / grid_height; int32_t y = indiv_id % grid_height; int cur_x,cur_y; if (threadIdx.x == 0) { count = 0; sum_local_fit = 0.0; } __syncthreads(); if (threadIdx.x == 0) { for (int8_t i = -1; i < SELECTION_SCOPE_X - 1; i++) { for (int8_t j = -1; j < SELECTION_SCOPE_Y - 1; j++) { cur_x = (x + i + grid_width) % grid_width; cur_y = (y + j + grid_height) % grid_height; local_fit_array[count] = fitness[cur_x * grid_height + cur_y]; atomicAdd(&sum_local_fit, local_fit_array[count]); count++; } } } __syncthreads(); //for(int16_t i = 0 ; i < NEIGHBORHOOD_SIZE ; i++) { probs[neightbor] = local_fit_array[neightbor]/sum_local_fit; __syncthreads(); if (threadIdx.x == 0) { Threefry::Device rng(gpu_counters,indiv_id,Threefry::Phase::REPROD,nb_indiv); int found_org = rng.roulette_random(probs, NEIGHBORHOOD_SIZE); int x_offset = (found_org / SELECTION_SCOPE_X) - 1; int y_offset = (found_org % SELECTION_SCOPE_Y) - 1; next_generation_reproducer[indiv_id] = ((x + x_offset + grid_width) % grid_width) * grid_height + ((y + y_offset + grid_height) % grid_height); } } __constant__ double cof[6] = { 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5 }; // Returns the value ln[gamma(X)] for X. // The gamma function is defined by the integral gamma(z) = int(0, +inf, t^(z-1).e^(-t)dt). // When the argument z is an integer, the gamma function is just the familiar factorial // function, but offset by one, n! = gamma(n + 1). __device__ static double gammln(double X) { double x, y, tmp, ser; y = x = X; tmp = x + 5.5; tmp -= (x+0.5) * log(tmp); ser = 1.000000000190015; for (int8_t j = 0 ; j <= 5 ; j++) { ser += cof[j] / ++y; } return -tmp + log(2.5066282746310005 * ser / x); } __device__ int32_t Threefry::Device::binomial_random(int32_t nb_drawings, double prob) { int32_t nb_success; // The binomial distribution is invariant under changing // ProbSuccess to 1-ProbSuccess, if we also change the answer to // NbTrials minus itself; we ll remember to do this below. double p; if (prob <= 0.5) p = prob; else p = 1.0 - prob; // mean of the deviate to be produced double mean = nb_drawings * p; if (nb_drawings < 25) // Use the direct method while NbTrials is not too large. // This can require up to 25 calls to the uniform random. { nb_success = 0; for (int32_t j = 1 ; j <= nb_drawings ; j++) { if (randomDouble() < p) nb_success++; } } else if (mean < 1.0) // If fewer than one event is expected out of 25 or more trials, // then the distribution is quite accurately Poisson. Use direct Poisson method. { double g = exp(-mean); double t = 1.0; int32_t j; for (j = 0; j <= nb_drawings ; j++) { t = t * randomDouble(); if (t < g) break; } if (j <= nb_drawings) nb_success = j; else nb_success = nb_drawings; } else // Use the rejection method. { double en = nb_drawings; double oldg = gammln(en + 1.0); double pc = 1.0 - p; double plog = log(p); double pclog = log(pc); // rejection method with a Lorentzian comparison function. double sq = sqrt(2.0 * mean * pc); double angle, y, em, t; do { do { angle = M_PI * randomDouble(); y = tan(angle); em = sq*y + mean; } while (em < 0.0 || em >= (en + 1.0)); // Reject. em = floor(em); // Trick for integer-valued distribution. t = 1.2 * sq * (1.0 + y*y) * exp(oldg - gammln(em + 1.0) - gammln(en - em + 1.0) + em * plog + (en - em) * pclog); } while (randomDouble() > t); // Reject. This happens about 1.5 times per deviate, on average. nb_success = (int32_t) rint(em); } // Undo the symmetry transformation. if (p != prob) nb_success = nb_drawings - nb_success; return nb_success; } __global__ void generate_mutations(unsigned long long* gpu_counters, size_t* dna_size, int* nb_mutations, GPUDnaMutator* dna_mutator_list,int* next_generation_reproducer, int nb_indivs, double mutation_rate) { int indiv_id = blockIdx.x; Threefry::Device rng(gpu_counters,indiv_id,Threefry::Phase::MUTATION,nb_indivs); double mutation_r = mutation_rate; int prev_gen_id = next_generation_reproducer[indiv_id]; size_t prev_gen_size = dna_size[prev_gen_id]; // Small mutations dna_mutator_list[indiv_id].nb_swi_ = rng. binomial_random(prev_gen_size, mutation_r); dna_mutator_list[indiv_id].nb_mut_ = dna_mutator_list[indiv_id].nb_swi_; dna_mutator_list[indiv_id].cpt_mut_ = dna_mutator_list[indiv_id].nb_mut_; nb_mutations[indiv_id] = dna_mutator_list[indiv_id].nb_mut_; atomicAdd(nb_mutations+nb_indivs,nb_mutations[indiv_id]); } __global__ void compute_tab_mutations_offset(int* nb_mutations, int* mutations_offset) { const int indiv_id = blockIdx.x; __shared__ int grid_mutation_offset; if (threadIdx.x == 0) { grid_mutation_offset = 0; } __syncthreads(); { int local_mutation_offset = 0; for (int cpt = threadIdx.x; cpt < indiv_id; cpt += blockDim.x) { local_mutation_offset += nb_mutations[cpt]; } if (local_mutation_offset > 0) atomicAdd(&grid_mutation_offset, local_mutation_offset); } __syncthreads(); if (threadIdx.x == 0) { mutations_offset[indiv_id] = grid_mutation_offset; } } __device__ static int mod(int a, int b) { assert(b > 0); while (a < 0) a += b; while (a >= b) a -= b; return a; } __global__ void predict_size_v2(size_t* dna_size, size_t* next_gen_dna_size, GPUDnaMutator* dna_mutator_list, TypeMutation* tab_mut, int* nb_mutations, int* mutations_offset, unsigned long long* gpu_counters,int* next_generation_reproducer, int max_genome_length, int min_genome_length, int nb_indiv) { const int indiv_id = blockIdx.x; int random_value; int transient_size = dna_size[next_generation_reproducer[indiv_id]]; Threefry::Device rng(gpu_counters,indiv_id,Threefry::Phase::MUTATION,nb_indiv); for (int mut_idx = 0; mut_idx < dna_mutator_list[indiv_id].nb_mut_; mut_idx++) { dna_mutator_list[indiv_id].cpt_mut_--; dna_mutator_list[indiv_id].nb_swi_--; int pos = rng.random(transient_size); tab_mut[mutations_offset[indiv_id]+mut_idx].type_ = MutationEventType::DO_SWITCH; tab_mut[mutations_offset[indiv_id]+mut_idx].pos_1_ = pos; } next_gen_dna_size[indiv_id] = transient_size; } __global__ void display_mut(TypeMutation* tab_mut, int* nb_mutations, int* mutations_offset) { for (int indiv_id = 0; indiv_id < 25; indiv_id++) { printf("nb mut %d : %d %d\n",indiv_id,nb_mutations[indiv_id],mutations_offset[indiv_id]); for (int i = 0; i < nb_mutations[indiv_id]; i++) { printf("%d -- %d %d %d %d %d %c%c%c%c%c%c %d\n", i, tab_mut[mutations_offset[indiv_id] + i].type_, tab_mut[mutations_offset[indiv_id] + i].pos_1_, tab_mut[mutations_offset[indiv_id] + i].pos_2_, tab_mut[mutations_offset[indiv_id] + i].pos_3_, tab_mut[mutations_offset[indiv_id] + i].number_, tab_mut[mutations_offset[indiv_id] + i].seq[0], tab_mut[mutations_offset[indiv_id] + i].seq[1], tab_mut[mutations_offset[indiv_id] + i].seq[2], tab_mut[mutations_offset[indiv_id] + i].seq[3], tab_mut[mutations_offset[indiv_id] + i].seq[4], tab_mut[mutations_offset[indiv_id] + i].seq[5], tab_mut[mutations_offset[indiv_id] + i].transient_size); } } } __global__ void compute_next_gen_dna_offset(size_t* next_gen_dna_size, size_t* next_gen_dna_offset) { const int indiv_id = blockIdx.x; __shared__ int grid_dna_offset; if (threadIdx.x == 0) { grid_dna_offset = 0; } __syncthreads(); { int local_dna_offset = 0; for (int cpt = threadIdx.x; cpt < indiv_id; cpt += blockDim.x) { local_dna_offset += next_gen_dna_size[cpt]; } if (local_dna_offset > 0) atomicAdd(&grid_dna_offset, local_dna_offset); } __syncthreads(); if (threadIdx.x == 0) { next_gen_dna_offset[indiv_id] = grid_dna_offset; } } __global__ void do_mutation_v2(TypeMutation* tab_mut, int* nb_mutations, size_t* dna_size, size_t* dna_offset, char* dna, char* next_gen_dna, size_t* next_gen_dna_size, size_t* next_gen_dna_offset, int* next_generation_reproducer, int* mutations_offset, unsigned long long int* nb_mut_bp) { int dna_pos_block = blockIdx.x; int indiv_id = blockIdx.y; int32_t locus = (dna_pos_block*128)+threadIdx.x; int32_t next_locus = locus; if (locus < next_gen_dna_size[indiv_id]) { int8_t mutate = 0; int nb_events = nb_mutations[indiv_id]; for (; nb_events > 0; nb_events--) { auto &mut = tab_mut[mutations_offset[indiv_id]+nb_events - 1]; switch (mut.type_) { case DO_SWITCH: if (locus == mut.pos_1_) mutate = not mutate; break; } } assert(locus >= 0); assert(locus < dna_size[next_generation_reproducer[indiv_id]]); auto base = dna[dna_offset[next_generation_reproducer[indiv_id]]+locus]; if (mutate) base = (base == '0') ? '1' : '0'; next_gen_dna[next_gen_dna_offset[indiv_id]+next_locus] = base; } } void run_a_step_on_GPU(int nb_indiv, double w_max, double selection_pressure, int grid_width, int grid_height, double mutation_rate) { int x_dim_size = (host_max_dna_size / 128)+1; int y_dim_size = nb_indiv; dim3 dimGrid(x_dim_size,y_dim_size); hipLaunchKernelGGL(( search_start_stop_RNA), dim3(dimGrid),dim3(128), 0, 0, dna_size,dna,dna_offset, nb_promoters,dna_term,nb_indiv,global_dna_size,nb_mut_bp); int total_nb_promoters_host; checkCuda(hipMemcpy(&total_nb_promoters_host, nb_promoters+nb_indiv, sizeof(int), hipMemcpyDeviceToHost)); if (total_nb_promoters_host > current_size_rna_list) { checkCuda(hipFree(rna)); current_size_rna_list = total_nb_promoters_host * 1.1; checkCuda(hipMalloc(&rna,current_size_rna_list* sizeof(pRNA))); } hipLaunchKernelGGL(( compute_RNA_offset), dim3(nb_indiv),dim3(128), 0, 0, nb_promoters,rna_offset); hipLaunchKernelGGL(( fill_RNA), dim3(dimGrid),dim3(128), 0, 0, dna_term, dna_size,dna_offset, nb_promoters, rna_offset, rna, rna_idx,nb_indiv); int global_nb_rna; checkCuda(hipMemcpy(&global_nb_rna, rna_idx+nb_indiv, sizeof(int), hipMemcpyDeviceToHost)); hipLaunchKernelGGL(( compute_RNA), dim3(global_nb_rna/128+1),dim3(128), 0, 0, dna_term,dna_size, dna_offset, rna, global_nb_rna); hipDeviceSynchronize(); hipLaunchKernelGGL(( compute_start_protein), dim3(global_nb_rna),dim3(1), 0, 0, start_protein, dna_size, dna_offset, rna, dna, nb_proteins, global_nb_rna, nb_indiv); hipDeviceSynchronize(); int total_nb_protein_host; checkCuda(hipMemcpy(&total_nb_protein_host, nb_proteins+nb_indiv, sizeof(int), hipMemcpyDeviceToHost)); if (total_nb_protein_host > current_size_protein_list) { checkCuda(hipFree(protein)); current_size_protein_list = total_nb_protein_host * 1.1; checkCuda(hipMalloc(&protein,current_size_protein_list* sizeof(pProtein))); } hipLaunchKernelGGL(( compute_protein_offset), dim3(nb_indiv),dim3(128), 0, 0, nb_proteins, protein_offset); hipLaunchKernelGGL(( fill_protein), dim3(global_nb_rna/128+1),dim3(128), 0, 0, start_protein,dna_offset, protein_idx, protein_offset, rna, protein, dna_size, global_nb_rna, nb_indiv); int global_nb_protein; checkCuda(hipMemcpy(&global_nb_protein, protein_idx+nb_indiv, sizeof(int), hipMemcpyDeviceToHost)); hipLaunchKernelGGL(( compute_proteins), dim3(1),dim3(128), 0, 0, start_protein, dna_size, dna_offset,protein, dna, global_nb_protein); hipLaunchKernelGGL(( translate_proteins), dim3(1),dim3(128), 0, 0, protein, dna_size, dna, dna_offset, global_nb_protein, w_max); hipLaunchKernelGGL(( compute_phenotype), dim3(1),dim3(128), 0, 0, protein,global_nb_protein, phenotype, phenotype_activ,phenotype_inhib, nb_indiv); hipLaunchKernelGGL(( compute_metaerror_fitness), dim3(nb_indiv),dim3(300), 0, 0, selection_pressure,phenotype, phenotype_activ,phenotype_inhib, target, metaerror, fitness); // SELECTION hipLaunchKernelGGL(( selection), dim3(nb_indiv),dim3(NEIGHBORHOOD_SIZE), 0, 0, fitness,next_generation_reproducer,gpu_counters, grid_width,grid_height,nb_indiv); // GENERATE MUTATION + PREDICT hipLaunchKernelGGL(( generate_mutations), dim3(nb_indiv),dim3(1), 0, 0, gpu_counters,dna_size,nb_mutations,dna_mutator_list, next_generation_reproducer, nb_indiv,mutation_rate); hipLaunchKernelGGL(( compute_tab_mutations_offset), dim3(nb_indiv),dim3(1), 0, 0, nb_mutations,mutations_offset); int total_nb_mutations_host; checkCuda(hipMemcpy(&total_nb_mutations_host, nb_mutations+nb_indiv, sizeof(int), hipMemcpyDeviceToHost)); if (total_nb_mutations_host > current_size_tab_mutation) { checkCuda(hipFree(tab_mutation)); current_size_tab_mutation = total_nb_mutations_host * 1.1; checkCuda(hipMalloc(&tab_mutation,current_size_tab_mutation* sizeof(TypeMutation))); } int min_genome_length_ = 10; int max_genome_length_ = 10000000; hipLaunchKernelGGL(( predict_size_v2), dim3(nb_indiv),dim3(1), 0, 0, dna_size, next_gen_dna_size, dna_mutator_list, tab_mutation,nb_mutations,mutations_offset,gpu_counters,next_generation_reproducer, max_genome_length_,min_genome_length_,nb_indiv); hipDeviceSynchronize(); // DO MUTATION std::vector <size_t> host_dna_size( nb_indiv); checkCuda(hipMemcpy(host_dna_size.data(), next_gen_dna_size, nb_indiv * sizeof(size_t), hipMemcpyDeviceToHost)); global_dna_size=0; for (int i = 0; i < nb_indiv; i++) { global_dna_size += host_dna_size[i]; host_max_dna_size = host_max_dna_size < host_dna_size[i] ? host_dna_size[i] : host_max_dna_size; } bool haveChange = false; if (global_dna_size >= allocated_global_dna_size) { haveChange = true; allocated_global_dna_size = global_dna_size*2; checkCuda(hipMalloc((void **) &next_gen_dna, allocated_global_dna_size * sizeof(char))); checkCuda(hipFree(dna_term)); checkCuda(hipMalloc((void **) &dna_term, allocated_global_dna_size * sizeof(int8_t * ))); checkCuda(hipFree(start_protein)); checkCuda(hipMalloc((void **) &start_protein, allocated_global_dna_size * sizeof(int8_t * ))); } hipLaunchKernelGGL(( compute_next_gen_dna_offset), dim3(nb_indiv),dim3(128), 0, 0, next_gen_dna_size, next_gen_dna_offset); x_dim_size = (host_max_dna_size / 128)+1; y_dim_size = nb_indiv; dim3 dimGrid2(x_dim_size,y_dim_size); hipLaunchKernelGGL(( do_mutation_v2), dim3(dimGrid2),dim3(128), 0, 0, tab_mutation, nb_mutations, dna_size, dna_offset, dna, next_gen_dna, next_gen_dna_size, next_gen_dna_offset,next_generation_reproducer, mutations_offset,nb_mut_bp); //printf("DNA 1 %p\n",dna); //next_generation_dna_read<<<1,1>>>(next_gen_dna, next_gen_dna_offset,next_gen_dna_size, global_dna_size); // SWITCH STRUCTURE int block = ceil(nb_indiv/32); hipLaunchKernelGGL(( do_memset), dim3(block),dim3(32), 0, 0, phenotype_activ,phenotype_inhib,nb_mutations,rna_idx,protein_idx,nb_proteins, nb_promoters,next_gen_dna_size, nb_indiv); //allocate_next_gen(nb_indiv); //printf("DNA 2 %p\n",dna); size_t* tmp_dna_size = dna_size; dna_size = next_gen_dna_size; next_gen_dna_size = tmp_dna_size; size_t* tmp_dna_offset = dna_offset; dna_offset = next_gen_dna_offset; next_gen_dna_offset = tmp_dna_offset; //global_dna_size = global_next_gen_dna_size; hipDeviceSynchronize(); assert(dna!=0); //printf("DNA 3 %p\n",dna); if (haveChange) { checkCuda(hipFree(dna)); checkCuda(hipMalloc((void **) &dna, allocated_global_dna_size * sizeof(char))); } //printf("DNA 4 %p\n",dna); hipDeviceSynchronize(); char* dna_tmp = dna; dna = next_gen_dna; next_gen_dna = dna_tmp; // clean(exp_m); } void allocate_next_gen(int nb_indiv) { for (int indiv_id = 0; indiv_id < nb_indiv; indiv_id++) { checkCuda(hipMemset(host_phenotype[indiv_id], 0.0, 300 * sizeof(double))); checkCuda(hipMemset(host_phenotype_activ[indiv_id], 0.0, 300 * sizeof(double))); checkCuda(hipMemset(host_phenotype_inhib[indiv_id], 0.0, 300 * sizeof(double))); } checkCuda(hipMemset(nb_mutations, 0, (nb_indiv+1) * sizeof(int))); checkCuda(hipMemset(mutations_offset, 0, nb_indiv * sizeof(int))); checkCuda(hipMemset(mutations_idx, 0, nb_indiv * sizeof(int))); checkCuda(hipMemset(rna_idx, 0, (nb_indiv+1) * sizeof(int32_t))); checkCuda(hipMemset(rna_offset, 0, nb_indiv * sizeof(int32_t))); checkCuda(hipMemset(protein_idx, 0, (nb_indiv+1) * sizeof(int32_t))); checkCuda(hipMemset(protein_offset, 0, nb_indiv * sizeof(int32_t))); checkCuda(hipMemset(nb_proteins, 0, (nb_indiv+1) * sizeof(int))); checkCuda(hipMemset(nb_promoters, 0, (nb_indiv+1) * sizeof(int))); } __global__ void do_memset(double** phenotype_activ, double** phenotype_inhib, int* nb_mutations, int32_t* rna_idx, int32_t* protein_idx, int* nb_proteins, int* nb_promoters, size_t* dna_size, int nb_indiv) { const int indiv_id = blockIdx.x * blockDim.x + threadIdx.x; if (indiv_id < nb_indiv) { for (int i = 0; i < 300; i++) { phenotype_inhib[indiv_id][i] = 0; phenotype_activ[indiv_id][i] = 0; } rna_idx[indiv_id] = 0; protein_idx[indiv_id] = 0; nb_proteins[indiv_id] = 0; nb_promoters[indiv_id] = 0; if (indiv_id == 0) { nb_mutations[nb_indiv] = 0; rna_idx[nb_indiv] = 0; protein_idx[nb_indiv] = 0; nb_proteins[nb_indiv] = 0; nb_promoters[nb_indiv] = 0; } } }
Algorithms.cu
#include "Algorithms.h" #include "Algorithms.cuh" #include "ExpManager.h" #include "ThreefryGPU.h" #include "GPUDna.cuh" #include <cstdint> #include <stdio.h> #include <unistd.h> #include <iostream> #include<cuda.h> #include<cuda_profiler_api.h> using namespace std; #define DEBUG 1 // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } constexpr int32_t PROMOTER_ARRAY_SIZE = 10000; void transfer_in(ExpManager* exp_m, bool first_gen) { exp_m->rng_->initDevice(); std::vector<size_t> host_dna_size(exp_m->nb_indivs_); std::vector<size_t> host_dna_offset(exp_m->nb_indivs_); // Compute sizes: // * global_dna_size // * host_dna_offset[] // * host_max_dna_size // * host_dna_size[] global_dna_size = 0; for (int i = 0; i < exp_m->nb_indivs_; i++) { host_dna_offset[i] = global_dna_size; global_dna_size += exp_m->internal_organisms_[i]->dna_->seq_.size(); host_max_dna_size = host_max_dna_size < exp_m->internal_organisms_[i]->dna_->seq_.size() ? exp_m->internal_organisms_[i]->dna_->seq_.size() : host_max_dna_size; host_dna_size[i] = exp_m->internal_organisms_[i]->dna_->seq_.size(); } // Create shorthands auto seq0 = exp_m->internal_organisms_[0]->dna_->seq_.data(); auto len0 = exp_m->internal_organisms_[0]->dna_->seq_.size(); allocated_global_dna_size = global_dna_size*5; // Allocate mem for the meta dna checkCuda(cudaMalloc((void **) &next_gen_dna, allocated_global_dna_size * sizeof(char))); checkCuda(cudaMalloc((void**) &dna, allocated_global_dna_size * sizeof(char))); // Tranfer **the first** indiv's sequence checkCuda(cudaMemcpy(dna, seq0, len0 * sizeof(char), cudaMemcpyHostToDevice)); // Send dna_size array checkCuda(cudaMalloc((void**) &dna_size, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(cudaMemcpy(dna_size, host_dna_size.data(), exp_m->nb_indivs_ * sizeof(size_t), cudaMemcpyHostToDevice)); checkCuda(cudaMalloc((void **) &nb_mut_bp, 1 * sizeof(unsigned long long int))); checkCuda(cudaMemset(nb_mut_bp, 0, 1 * sizeof(unsigned long long int))); // Launch kernel to clone initial genome into the whole pop int x_dim_size = (len0 / 128)+1; int y_dim_size = exp_m->nb_indivs_; dim3 dimGrid(x_dim_size,y_dim_size); clone_init_indiv<<<dimGrid,128>>>(dna_size, dna); checkCuda(cudaMalloc((void**) &dna_term, allocated_global_dna_size * sizeof(int8_t*))); checkCuda(cudaMalloc((void**) &start_protein, allocated_global_dna_size * sizeof(int8_t*))); checkCuda(cudaMalloc((void**) &dna_offset, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(cudaMemcpy(dna_offset, host_dna_offset.data(), exp_m->nb_indivs_ * sizeof(size_t), cudaMemcpyHostToDevice)); checkCuda(cudaMalloc((void**) &next_gen_dna_offset, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(cudaMalloc((void**) &next_gen_dna_size, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(cudaMalloc((void**) &nb_mutations, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(cudaMemset(nb_mutations, 0, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(cudaMalloc((void**) &mutations_offset, exp_m->nb_indivs_ * sizeof(int))); checkCuda(cudaMemset(mutations_offset, 0, exp_m->nb_indivs_ * sizeof(int))); checkCuda(cudaMalloc((void**) &mutations_idx, exp_m->nb_indivs_ * sizeof(int))); checkCuda(cudaMemset(mutations_idx, 0, exp_m->nb_indivs_ * sizeof(int))); checkCuda(cudaMalloc((void**) &dna_mutator_list, exp_m->nb_indivs_ * sizeof(GPUDnaMutator))); current_size_tab_mutation = exp_m->nb_indivs_ * 100; checkCuda(cudaMalloc(&tab_mutation, current_size_tab_mutation * sizeof(TypeMutation))); checkCuda(cudaMalloc((void**) &rna_idx, (exp_m->nb_indivs_ + 1) * sizeof(int32_t))); checkCuda(cudaMemset(rna_idx, 0, (exp_m->nb_indivs_ + 1) * sizeof(int32_t))); checkCuda(cudaMalloc((void**) &rna_offset, exp_m->nb_indivs_ * sizeof(int32_t))); checkCuda(cudaMemset(rna_offset, 0, exp_m->nb_indivs_ * sizeof(int32_t))); checkCuda(cudaMalloc((void**) &protein_idx, (exp_m->nb_indivs_ + 1) * sizeof(int32_t))); checkCuda( cudaMemset(protein_idx, 0, (exp_m->nb_indivs_ + 1) * sizeof(int32_t))); checkCuda(cudaMalloc((void**) &protein_offset, exp_m->nb_indivs_ * sizeof(int32_t))); checkCuda(cudaMemset(protein_offset, 0, exp_m->nb_indivs_ * sizeof(int32_t))); checkCuda(cudaMalloc((void**) &next_generation_reproducer, exp_m->nb_indivs_ * sizeof(size_t))); checkCuda(cudaMalloc((void**) &nb_promoters, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(cudaMemset(nb_promoters, 0, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(cudaMalloc((void**) &nb_proteins, (exp_m->nb_indivs_ + 1) * sizeof(int))); checkCuda(cudaMemset(nb_proteins, 0, (exp_m->nb_indivs_ + 1) * sizeof(int))); host_phenotype = (double**) malloc(exp_m->nb_indivs_ * sizeof(double*)); checkCuda( cudaMalloc((void***) &phenotype, exp_m->nb_indivs_ * sizeof(double*))); host_phenotype_activ = (double**) malloc(exp_m->nb_indivs_ * sizeof(double*)); checkCuda(cudaMalloc((void***) &phenotype_activ, exp_m->nb_indivs_ * sizeof(double*))); host_phenotype_inhib = (double**) malloc(exp_m->nb_indivs_ * sizeof(double*)); checkCuda(cudaMalloc((void***) &phenotype_inhib, exp_m->nb_indivs_ * sizeof(double*))); for (int indiv_id = 0; indiv_id < exp_m->nb_indivs_; indiv_id++) { checkCuda( cudaMalloc((void**) &host_phenotype[indiv_id], 300 * sizeof(double))); checkCuda(cudaMemset(host_phenotype[indiv_id], 0.0, 300 * sizeof(double))); checkCuda(cudaMalloc((void**) &host_phenotype_activ[indiv_id], 300 * sizeof(double))); checkCuda( cudaMemset(host_phenotype_activ[indiv_id], 0.0, 300 * sizeof(double))); checkCuda(cudaMalloc((void**) &host_phenotype_inhib[indiv_id], 300 * sizeof(double))); checkCuda( cudaMemset(host_phenotype_inhib[indiv_id], 0.0, 300 * sizeof(double))); } current_size_rna_list = exp_m->nb_indivs_ * 10000; checkCuda(cudaMalloc(&rna, current_size_rna_list * sizeof(pRNA))); current_size_protein_list = exp_m->nb_indivs_ * 1000; checkCuda(cudaMalloc(&protein, current_size_protein_list * sizeof(pProtein))); checkCuda( cudaMemcpy(phenotype, host_phenotype, exp_m->nb_indivs_ * sizeof(double*), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(phenotype_activ, host_phenotype_activ, exp_m->nb_indivs_ * sizeof(double*), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(phenotype_inhib, host_phenotype_inhib, exp_m->nb_indivs_ * sizeof(double*), cudaMemcpyHostToDevice)); checkCuda(cudaMalloc((void**) &target, 300 * sizeof(double))); double target_host[300]; for (int i = 0; i < 300; i++) { target_host[i] = exp_m->target[i]; } checkCuda(cudaMemcpy(target, target_host, 300 * sizeof(double), cudaMemcpyHostToDevice)); checkCuda(cudaMalloc((void**) &metaerror, exp_m->nb_indivs_ * sizeof(double))); checkCuda(cudaMalloc((void**) &fitness, exp_m->nb_indivs_ * sizeof(double))); //printf("GPU Counter %d\n",exp_m->rng_->counters().size()); checkCuda(cudaMalloc((void**) &gpu_counters, exp_m->rng_->counters().size() * sizeof(unsigned long long))); checkCuda(cudaMemcpy(gpu_counters, exp_m->rng_->counters().data(), exp_m->rng_->counters().size() * sizeof(unsigned long long), cudaMemcpyHostToDevice)); } __global__ // Copy first indiv's dna into all the other indivs' dna void clone_init_indiv(size_t* dna_size, char* dna) { int dna_chunk_idx = blockIdx.x; int indiv_id = blockIdx.y; if(indiv_id == 0) return; // don't copy indiv 0 onto itself int pos = (dna_chunk_idx*128)+threadIdx.x; if (pos < dna_size[0]) { dna[indiv_id*dna_size[0] + pos] = dna[pos]; } } __global__ void search_start_stop_RNA(size_t* dna_size, char* dna, size_t* dna_offset, int* nb_promoters, int8_t* dna_term, int nb_indivs, int global_dna_size, unsigned long long* nb_mut_bp) { int dna_pos_block = blockIdx.x; int indiv_id = blockIdx.y; int dna_pos = (dna_pos_block*128)+threadIdx.x; __shared__ int nb_prom_block; if (threadIdx.x == 0) { nb_prom_block = 0; } __syncthreads(); if (dna_pos < dna_size[indiv_id] && dna_size[indiv_id] >= PROM_SIZE) { dna_term[dna_offset[indiv_id]+dna_pos] = 22; //atomicAdd(nb_mut_bp,1); int prom_dist[22]; int term_dist[4]; for (int motif_id = 0; motif_id < 26; motif_id++) { if (motif_id < 22) { prom_dist[motif_id] = PROM_SEQ[motif_id] == dna[dna_pos + motif_id >= dna_size[indiv_id] ? dna_offset[indiv_id]+ dna_pos + motif_id - dna_size[indiv_id] : dna_offset[indiv_id]+ dna_pos + motif_id] ? 0 : 1; } else if (motif_id >= 22) { int t_motif_id = motif_id - 22; term_dist[t_motif_id] = dna[dna_pos + t_motif_id >= dna_size[indiv_id] ? dna_offset[indiv_id]+dna_pos + t_motif_id - dna_size[indiv_id] : dna_offset[indiv_id]+ dna_pos + t_motif_id] != dna[dna_pos - t_motif_id + 10 >= dna_size[indiv_id] ? dna_offset[indiv_id]+ dna_pos - t_motif_id + 10 - dna_size[indiv_id] : dna_offset[indiv_id]+ dna_pos - t_motif_id + 10] ? 1 : 0; } } int8_t dist_prom = prom_dist[0] + prom_dist[1] + prom_dist[2] + prom_dist[3] + prom_dist[4] + prom_dist[5] + prom_dist[6] + prom_dist[7] + prom_dist[8] + prom_dist[9] + prom_dist[10] + prom_dist[11] + prom_dist[12] + prom_dist[13] + prom_dist[14] + prom_dist[15] + prom_dist[16] + prom_dist[17] + prom_dist[18] + prom_dist[19] + prom_dist[20] + prom_dist[21]; dna_term[dna_offset[indiv_id]+dna_pos] = dist_prom; if (dist_prom <= 4) { int rna_idx = atomicAdd(&nb_prom_block, 1); } int dist_term = term_dist[0] + term_dist[1] + term_dist[2] + term_dist[3]; dna_term[dna_offset[indiv_id]+dna_pos] |= dist_term == 4 ? 1<<7 : 0; } __syncthreads(); if (threadIdx.x == 0) { atomicAdd(nb_promoters+indiv_id,nb_prom_block); atomicAdd(nb_promoters+nb_indivs,nb_prom_block); } } __global__ void compute_RNA_offset(int* nb_promoters, int* rna_offset) { const int indiv_id = blockIdx.x; __shared__ int grid_rna_offset; if (threadIdx.x == 0) { grid_rna_offset = 0; } __syncthreads(); { int local_rna_offset = 0; for (int cpt = threadIdx.x; cpt < indiv_id; cpt += blockDim.x) { local_rna_offset += nb_promoters[cpt]; } if (local_rna_offset > 0) atomicAdd(&grid_rna_offset, local_rna_offset); } __syncthreads(); if (threadIdx.x == 0) { rna_offset[indiv_id] = grid_rna_offset; } } __global__ void fill_RNA( int8_t* dna_term, size_t* dna_size, size_t* dna_offset, int* nb_promoters, int* rna_offset, pRNA* rnas, int32_t* rna_idx, int nb_indiv) { int dna_pos_block = blockIdx.x; int indiv_id = blockIdx.y; int dna_pos = (dna_pos_block * 128) + threadIdx.x; if (dna_pos < dna_size[indiv_id] && dna_size[indiv_id] >= PROM_SIZE) { // Masque le bit de poid fort int8_t dist = dna_term[dna_offset[indiv_id]+dna_pos] & (0x7F); if (dist <= 4) { int local_rna_idx = atomicAdd(rna_idx + indiv_id, 1); atomicAdd(rna_idx + nb_indiv, 1); rnas[rna_offset[indiv_id] + local_rna_idx].begin = dna_pos; rnas[rna_offset[indiv_id] + local_rna_idx].dist = dist; rnas[rna_offset[indiv_id] + local_rna_idx].transcribed = false; rnas[rna_offset[indiv_id] + local_rna_idx].indiv_id = indiv_id; } } } __global__ void compute_RNA( int8_t* dna_term, size_t* dna_size, size_t* dna_offset, pRNA* rnas, int global_nb_rna) { const int globalIdx = blockIdx.x*blockDim.x+threadIdx.x; if (globalIdx < global_nb_rna ) { int indiv_id = rnas[globalIdx].indiv_id; if (dna_size[indiv_id] >= PROM_SIZE) { int k = rnas[globalIdx].begin + 22; k = k >= dna_size[indiv_id] ? k - dna_size[indiv_id] : k; int k_end = k; bool found=false; do { //printf("%d -- %d %ld\n",indiv_id,k,dna_size[indiv_id]); if (dna_term[dna_offset[indiv_id]+k] & (1<<7)) { int32_t rna_end = k + 10 >= dna_size[indiv_id] ? k + 10 - dna_size[indiv_id] : k + 10; int32_t rna_length = 0; if (rnas[globalIdx].begin > rna_end) rna_length = dna_size[indiv_id] - rnas[globalIdx].begin + rna_end; else rna_length = rna_end - rnas[globalIdx].begin; if (rna_length < 19) { rnas[globalIdx].begin = 0; rnas[globalIdx].end = 0; rnas[globalIdx].length = 0; rnas[globalIdx].transcribed = false; break; } rnas[globalIdx].end = rna_end; rnas[globalIdx].transcribed = true; rnas[globalIdx].length = rna_length; if (rnas[globalIdx].end>=dna_size[indiv_id]) { printf("Termin %d %d S %d %ld\n", rnas[globalIdx].begin, rnas[globalIdx].end,indiv_id,dna_size[indiv_id]); //assert(rnas[globalIdx].end<dna_size[indiv_id]); } found=true; break; } k++; k = k >= dna_size[indiv_id] ? k - dna_size[indiv_id] : k; } while (k != k_end); } } else { rnas[globalIdx].begin = 0; rnas[globalIdx].end = 0; rnas[globalIdx].length = 0; rnas[globalIdx].transcribed = false; } } __global__ void display_RNA( pRNA* rna, size_t* dna_size, int32_t global_nb_rna) { for(int i = 0; i < global_nb_rna; i++) { if (rna[i].transcribed) if (rna[i].end>=dna_size[rna[i].indiv_id]) { printf("UIIH %d %d S %d -- %ld\n",rna[i].begin,rna[i].end,rna[i].indiv_id,dna_size[rna[i].indiv_id]); } } } __global__ void compute_start_protein(int8_t* start_protein, size_t* dna_size, size_t* dna_offset, pRNA* rna, char* dna, int32_t* nb_proteins, int32_t global_nb_rna, int nb_indiv) { const int globalIdx = blockIdx.x*blockDim.x+threadIdx.x; int nb_prot = 0; if (globalIdx < global_nb_rna) { if (rna[globalIdx].transcribed) { const int indiv_id = rna[globalIdx].indiv_id; int c_pos = rna[globalIdx].begin; if (rna[globalIdx].length > 22) { c_pos += 22; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; int count_loop=0; if (rna[globalIdx].end>=dna_size[indiv_id]) { printf("ator %d S %d\n",rna[globalIdx].end,indiv_id); assert(rna[globalIdx].end<dna_size[indiv_id]); } while (c_pos != rna[globalIdx].end) { //if (indiv_id==606) printf("%d -- %d %d\n",indiv_id,c_pos,rna[globalIdx].end); bool start = false; int t_pos, k_t; for (int k = 0; k < 9; k++) { k_t = k >= 6 ? k + 4 : k; t_pos = c_pos + k_t >= dna_size[indiv_id] ? c_pos + k_t - dna_size[indiv_id] : c_pos + k_t; count_loop++; if (count_loop>10000) {printf("%d %d %d %d %d %d %d %ld\n",indiv_id,globalIdx,k, c_pos,t_pos, rna[globalIdx].begin,rna[globalIdx].end, dna_size[indiv_id]);assert(0);} if (dna[dna_offset[indiv_id]+t_pos] == SHINE_DAL_SEQ[k]) { start = true; } else { start = false; break; } } start_protein[dna_offset[indiv_id]+c_pos] = start; if (start) { nb_prot++; } c_pos++; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; } } atomicAdd(nb_proteins+indiv_id,nb_prot); atomicAdd(nb_proteins+nb_indiv,nb_prot); } } } __global__ void compute_protein_offset(int32_t* nb_proteins, int* protein_offset) { const int indiv_id = blockIdx.x; __shared__ int grid_protein_offset; if (threadIdx.x == 0) { grid_protein_offset = 0; } __syncthreads(); { int local_protein_offset = 0; for (int cpt = threadIdx.x; cpt < indiv_id; cpt += blockDim.x) { local_protein_offset += nb_proteins[cpt]; } if (local_protein_offset > 0) atomicAdd(&grid_protein_offset, local_protein_offset); } __syncthreads(); if (threadIdx.x == 0) { protein_offset[indiv_id] = grid_protein_offset; } } __global__ void fill_protein(int8_t* start_protein, size_t* dna_offset, int* protein_idx, int* protein_offset, pRNA* rna, pProtein* protein, size_t* dna_size, int32_t global_nb_rna, int nb_indiv) { const int globalIdx = blockIdx.x*blockDim.x+threadIdx.x; if (globalIdx < global_nb_rna) { if (rna[globalIdx].transcribed) { int indiv_id = rna[globalIdx].indiv_id; int c_pos = rna[globalIdx].begin; if (rna[globalIdx].length > 22) { c_pos += 22; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; while (c_pos != rna[globalIdx].end) { if (start_protein[dna_offset[indiv_id]+c_pos] == 1) { int local_protein_idx = atomicAdd(protein_idx + indiv_id, 1); atomicAdd(protein_idx + nb_indiv, 1); protein[protein_offset[indiv_id] + local_protein_idx].protein_start = c_pos; protein[protein_offset[indiv_id] + local_protein_idx].indiv_id = rna[globalIdx].indiv_id; protein[protein_offset[indiv_id] + local_protein_idx].stop_RNA = rna[globalIdx].end; protein[protein_offset[indiv_id] + local_protein_idx].translated = false; protein[protein_offset[indiv_id] + local_protein_idx].e = 1.0 - fabs(((double) rna[globalIdx].dist)) / 5.0; } c_pos++; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; } } } } } __global__ void compute_proteins( int8_t* start_protein, size_t* dna_size, size_t* dna_offset, pProtein* protein, char* dna, int32_t global_nb_protein) { __shared__ int next_protein_idx; if (threadIdx.x == 0) { next_protein_idx = 0; } __syncthreads(); int local_protein_idx = atomicAdd(&next_protein_idx,1); while (local_protein_idx < global_nb_protein) { int indiv_id = protein[local_protein_idx].indiv_id; int start_protein_pos = protein[local_protein_idx].protein_start + 13; int length = -1; start_protein_pos = start_protein_pos >= dna_size[indiv_id] ? start_protein_pos - dna_size[indiv_id] : start_protein_pos; if (protein[local_protein_idx].protein_start < protein[local_protein_idx].stop_RNA) { length = protein[local_protein_idx].stop_RNA - protein[local_protein_idx].protein_start; } else { length = dna_size[indiv_id] - protein[local_protein_idx].protein_start + protein[local_protein_idx].stop_RNA + 1; } length -= 13; bool is_protein = false; length+=1; length = length - (length%3); for (int loop_i = 0; length - loop_i >= 2; loop_i+=3) { int t_k; start_protein_pos = start_protein_pos >= dna_size[indiv_id] ? start_protein_pos - dna_size[indiv_id] : start_protein_pos; is_protein = false; for (int k = 0; k < 3; k++) { t_k = start_protein_pos + k >= dna_size[indiv_id] ? start_protein_pos - dna_size[indiv_id] + k : start_protein_pos + k; if (dna[dna_offset[indiv_id]+t_k] == PROTEIN_END[k]) { is_protein = true; } else { is_protein = false; break; } } if (is_protein) { int prot_length = -1; if (protein[local_protein_idx].protein_start + 13 < t_k) { prot_length = t_k - (protein[local_protein_idx].protein_start + 13); } else { prot_length = dna_size[indiv_id] - (protein[local_protein_idx].protein_start + 13) + t_k; } if (prot_length >= 3) { protein[local_protein_idx].protein_end = t_k; protein[local_protein_idx].protein_length = prot_length; protein[local_protein_idx].translated = true; } break; } start_protein_pos += 3; start_protein_pos = start_protein_pos >= dna_size[indiv_id] ? start_protein_pos - dna_size[indiv_id] : start_protein_pos; } local_protein_idx = atomicAdd(&next_protein_idx,1); } } __global__ void translate_proteins( pProtein* protein, size_t* dna_size, char* dna, size_t* dna_offset, int32_t global_nb_protein, double w_max) { __shared__ int next_protein_idx; if (threadIdx.x == 0) { next_protein_idx = 0; } __syncthreads(); int local_protein_idx = atomicAdd(&next_protein_idx,1); while (local_protein_idx < global_nb_protein) { int indiv_id = protein[local_protein_idx].indiv_id; if (protein[local_protein_idx].translated) { int c_pos = protein[local_protein_idx].protein_start, t_pos; int end_pos = protein[local_protein_idx].protein_end; c_pos += 13; end_pos -= 3; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; end_pos = end_pos < 0 ? dna_size[indiv_id] + end_pos : end_pos; int8_t value = 0; int8_t codon_list[64] = {}; int8_t codon_idx = 0; int32_t count_loop = 0; bool contin = true; while (count_loop < protein[local_protein_idx].protein_length / 3 && codon_idx < 64) { value = 0; for (int8_t i = 0; i < 3; i++) { t_pos = c_pos + i >= dna_size[indiv_id] ? c_pos + i - dna_size[indiv_id] : c_pos + i; if (dna[dna_offset[indiv_id]+t_pos] == '1') value += 1 << (CODON_SIZE - i - 1); } codon_list[codon_idx] = value; codon_idx++; count_loop++; c_pos += 3; c_pos = c_pos >= dna_size[indiv_id] ? c_pos - dna_size[indiv_id] : c_pos; } double M = 0.0; double W = 0.0; double H = 0.0; int32_t nb_m = 0; int32_t nb_w = 0; int32_t nb_h = 0; bool bin_m = false; // Initializing to false will yield a conservation of the high weight bit bool bin_w = false; // when applying the XOR operator for the Gray to standard conversion bool bin_h = false; for (int i = 0; i < codon_idx; i++) { switch (codon_list[i]) { case CODON_M0 : { // M codon found nb_m++; // Convert Gray code to "standard" binary code bin_m ^= false; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ M <<= 1; M *= 2; // Add this nucleotide's contribution to M if (bin_m) M += 1; break; } case CODON_M1 : { // M codon found nb_m++; // Convert Gray code to "standard" binary code bin_m ^= true; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest bit was found, make a left bitwise shift //~ M <<= 1; M *= 2; // Add this nucleotide's contribution to M if (bin_m) M += 1; break; } case CODON_W0 : { // W codon found nb_w++; // Convert Gray code to "standard" binary code bin_w ^= false; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ W <<= 1; W *= 2; // Add this nucleotide's contribution to W if (bin_w) W += 1; break; } case CODON_W1 : { // W codon found nb_w++; // Convert Gray code to "standard" binary code bin_w ^= true; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ W <<= 1; W *= 2; // Add this nucleotide's contribution to W if (bin_w) W += 1; break; } case CODON_H0 : case CODON_START : // Start codon codes for the same amino-acid as H0 codon { // H codon found nb_h++; // Convert Gray code to "standard" binary code bin_h ^= false; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ H <<= 1; H *= 2; // Add this nucleotide's contribution to H if (bin_h) H += 1; break; } case CODON_H1 : { // H codon found nb_h++; // Convert Gray code to "standard" binary code bin_h ^= true; // as bin_m was initialized to false, the XOR will have no effect on the high weight bit // A lower-than-the-previous-lowest weight bit was found, make a left bitwise shift //~ H <<= 1; H *= 2; // Add this nucleotide's contribution to H if (bin_h) H += 1; break; } } } // ---------------------------------------------------------------------------------- // 2) Normalize M, W and H values in [0;1] according to number of codons of each kind // ---------------------------------------------------------------------------------- protein[local_protein_idx].m = nb_m != 0 ? M / (pow(2, nb_m) - 1) : 0.5; protein[local_protein_idx].w = nb_w != 0 ? W / (pow(2, nb_w) - 1) : 0.0; protein[local_protein_idx].h = nb_h != 0 ? H / (pow(2, nb_h) - 1) : 0.5; // ------------------------------------------------------------------------------------ // 3) Normalize M, W and H values according to the allowed ranges (defined in macros.h) // ------------------------------------------------------------------------------------ // x_min <= M <= x_max // w_min <= W <= w_max // h_min <= H <= h_max protein[local_protein_idx].m = (X_MAX - X_MIN) * protein[local_protein_idx].m + X_MIN; protein[local_protein_idx].w = (w_max - W_MIN) * protein[local_protein_idx].w + W_MIN; protein[local_protein_idx].h = (H_MAX - H_MIN) * protein[local_protein_idx].h + H_MIN; if (nb_m == 0 || nb_w == 0 || nb_h == 0 || protein[local_protein_idx].w == 0.0 || protein[local_protein_idx].h == 0.0) { protein[local_protein_idx].is_functional = false; } else { protein[local_protein_idx].is_functional = true; } } local_protein_idx = atomicAdd(&next_protein_idx,1); } } __global__ void compute_phenotype( pProtein* protein, int32_t global_nb_protein, double** phenotype, double** phenotype_activ, double** phenotype_inhib, int nb_indiv) { __shared__ int next_protein_idx; if (threadIdx.x == 0) { next_protein_idx = 0; } __syncthreads(); int local_protein_idx = atomicAdd(&next_protein_idx,1); while (local_protein_idx < global_nb_protein) { int indiv_id = protein[local_protein_idx].indiv_id; if (protein[local_protein_idx].translated) { if (fabs(protein[local_protein_idx].w) < 1e-15 || fabs(protein[local_protein_idx].h) < 1e-15) { } else { if (protein[local_protein_idx].is_functional) { // Compute triangle points' coordinates double x0 = protein[local_protein_idx].m - protein[local_protein_idx].w; double x1 = protein[local_protein_idx].m; double x2 = protein[local_protein_idx].m + protein[local_protein_idx].w; int ix0 = (int) (x0 * 300); int ix1 = (int) (x1 * 300); int ix2 = (int) (x2 * 300); if (ix0 < 0) ix0 = 0; else if (ix0 > (299)) ix0 = 299; if (ix1 < 0) ix1 = 0; else if (ix1 > (299)) ix1 = 299; if (ix2 < 0) ix2 = 0; else if (ix2 > (299)) ix2 = 299; // Compute the first equation of the triangle double incY = (protein[local_protein_idx].h * protein[local_protein_idx].e) / (ix1 - ix0); int count = 1; // Updating value between x0 and x1 for (int i = ix0 + 1; i < ix1; i++) { if (protein[local_protein_idx].h > 0) atomicAdd(&phenotype_activ[indiv_id][i], (incY * (count++))); else atomicAdd(&phenotype_inhib[indiv_id][i], (incY * (count++))); } if (protein[local_protein_idx].h > 0) { atomicAdd(&phenotype_activ[indiv_id][ix1], (protein[local_protein_idx].h * protein[local_protein_idx].e)); } else atomicAdd(&phenotype_inhib[indiv_id][ix1], (protein[local_protein_idx].h * protein[local_protein_idx].e)); // Compute the second equation of the triangle incY = (protein[local_protein_idx].h * protein[local_protein_idx].e) / (ix2 - ix1); count = 1; // Updating value between x1 and x2 for (int i = ix1 + 1; i < ix2; i++) { if (protein[local_protein_idx].h > 0) atomicAdd(&phenotype_activ[indiv_id][i], ((protein[local_protein_idx].h * protein[local_protein_idx].e) - (incY * (count++)))); else atomicAdd(&phenotype_inhib[indiv_id][i], ((protein[local_protein_idx].h * protein[local_protein_idx].e) - (incY * (count++)))); } } } } local_protein_idx = atomicAdd(&next_protein_idx,1); } __syncthreads(); } __global__ void compute_metaerror_fitness(double selection_pressure,double** phenotype, double** phenotype_activ,double** phenotype_inhib, double* target, double* metaerror, double* fitness) { int indiv_id = blockIdx.x; int fuzzy_idx = threadIdx.x; if (phenotype_activ[indiv_id][fuzzy_idx] > 1.0) phenotype_activ[indiv_id][fuzzy_idx] = 1.0; if (phenotype_inhib[indiv_id][fuzzy_idx] < -1.0) phenotype_inhib[indiv_id][fuzzy_idx] = -1.0; phenotype[indiv_id][fuzzy_idx] = phenotype_activ[indiv_id][fuzzy_idx] + phenotype_inhib[indiv_id][fuzzy_idx]; __shared__ double delta[300]; if (phenotype[indiv_id][fuzzy_idx] > 1) phenotype[indiv_id][fuzzy_idx] = 1; if (phenotype[indiv_id][fuzzy_idx] < 0) phenotype[indiv_id][fuzzy_idx] = 0; delta[fuzzy_idx] = phenotype[indiv_id][fuzzy_idx] - target[fuzzy_idx]; __syncthreads(); if (threadIdx.x == 0) { metaerror[indiv_id] = 0; for (int i = 0; i < 299; i++) { metaerror[indiv_id] += ((fabs(delta[i]) + fabs(delta[i + 1])) / (600.0)); } fitness[indiv_id] = exp( -selection_pressure * ((double)metaerror[indiv_id])); } } __device__ int32_t Threefry::Device::roulette_random(double* probs, int32_t nb_elts) { double pick_one = 0.0; while (pick_one == 0.0) { pick_one = randomDouble(); } int32_t found_org = 0; pick_one -= probs[0]; while (pick_one > 0) { assert(found_org<nb_elts-1); pick_one -= probs[++found_org]; } return found_org; } __global__ void selection(double* fitness, int* next_generation_reproducer, unsigned long long* gpu_counters, int grid_width, int grid_height, int nb_indiv) { int indiv_id = blockIdx.x; int neightbor = threadIdx.x; __shared__ double local_fit_array[NEIGHBORHOOD_SIZE]; __shared__ double probs[NEIGHBORHOOD_SIZE]; __shared__ int count; __shared__ double sum_local_fit; int32_t x = indiv_id / grid_height; int32_t y = indiv_id % grid_height; int cur_x,cur_y; if (threadIdx.x == 0) { count = 0; sum_local_fit = 0.0; } __syncthreads(); if (threadIdx.x == 0) { for (int8_t i = -1; i < SELECTION_SCOPE_X - 1; i++) { for (int8_t j = -1; j < SELECTION_SCOPE_Y - 1; j++) { cur_x = (x + i + grid_width) % grid_width; cur_y = (y + j + grid_height) % grid_height; local_fit_array[count] = fitness[cur_x * grid_height + cur_y]; atomicAdd(&sum_local_fit, local_fit_array[count]); count++; } } } __syncthreads(); //for(int16_t i = 0 ; i < NEIGHBORHOOD_SIZE ; i++) { probs[neightbor] = local_fit_array[neightbor]/sum_local_fit; __syncthreads(); if (threadIdx.x == 0) { Threefry::Device rng(gpu_counters,indiv_id,Threefry::Phase::REPROD,nb_indiv); int found_org = rng.roulette_random(probs, NEIGHBORHOOD_SIZE); int x_offset = (found_org / SELECTION_SCOPE_X) - 1; int y_offset = (found_org % SELECTION_SCOPE_Y) - 1; next_generation_reproducer[indiv_id] = ((x + x_offset + grid_width) % grid_width) * grid_height + ((y + y_offset + grid_height) % grid_height); } } __constant__ double cof[6] = { 76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5 }; // Returns the value ln[gamma(X)] for X. // The gamma function is defined by the integral gamma(z) = int(0, +inf, t^(z-1).e^(-t)dt). // When the argument z is an integer, the gamma function is just the familiar factorial // function, but offset by one, n! = gamma(n + 1). __device__ static double gammln(double X) { double x, y, tmp, ser; y = x = X; tmp = x + 5.5; tmp -= (x+0.5) * log(tmp); ser = 1.000000000190015; for (int8_t j = 0 ; j <= 5 ; j++) { ser += cof[j] / ++y; } return -tmp + log(2.5066282746310005 * ser / x); } __device__ int32_t Threefry::Device::binomial_random(int32_t nb_drawings, double prob) { int32_t nb_success; // The binomial distribution is invariant under changing // ProbSuccess to 1-ProbSuccess, if we also change the answer to // NbTrials minus itself; we ll remember to do this below. double p; if (prob <= 0.5) p = prob; else p = 1.0 - prob; // mean of the deviate to be produced double mean = nb_drawings * p; if (nb_drawings < 25) // Use the direct method while NbTrials is not too large. // This can require up to 25 calls to the uniform random. { nb_success = 0; for (int32_t j = 1 ; j <= nb_drawings ; j++) { if (randomDouble() < p) nb_success++; } } else if (mean < 1.0) // If fewer than one event is expected out of 25 or more trials, // then the distribution is quite accurately Poisson. Use direct Poisson method. { double g = exp(-mean); double t = 1.0; int32_t j; for (j = 0; j <= nb_drawings ; j++) { t = t * randomDouble(); if (t < g) break; } if (j <= nb_drawings) nb_success = j; else nb_success = nb_drawings; } else // Use the rejection method. { double en = nb_drawings; double oldg = gammln(en + 1.0); double pc = 1.0 - p; double plog = log(p); double pclog = log(pc); // rejection method with a Lorentzian comparison function. double sq = sqrt(2.0 * mean * pc); double angle, y, em, t; do { do { angle = M_PI * randomDouble(); y = tan(angle); em = sq*y + mean; } while (em < 0.0 || em >= (en + 1.0)); // Reject. em = floor(em); // Trick for integer-valued distribution. t = 1.2 * sq * (1.0 + y*y) * exp(oldg - gammln(em + 1.0) - gammln(en - em + 1.0) + em * plog + (en - em) * pclog); } while (randomDouble() > t); // Reject. This happens about 1.5 times per deviate, on average. nb_success = (int32_t) rint(em); } // Undo the symmetry transformation. if (p != prob) nb_success = nb_drawings - nb_success; return nb_success; } __global__ void generate_mutations(unsigned long long* gpu_counters, size_t* dna_size, int* nb_mutations, GPUDnaMutator* dna_mutator_list,int* next_generation_reproducer, int nb_indivs, double mutation_rate) { int indiv_id = blockIdx.x; Threefry::Device rng(gpu_counters,indiv_id,Threefry::Phase::MUTATION,nb_indivs); double mutation_r = mutation_rate; int prev_gen_id = next_generation_reproducer[indiv_id]; size_t prev_gen_size = dna_size[prev_gen_id]; // Small mutations dna_mutator_list[indiv_id].nb_swi_ = rng. binomial_random(prev_gen_size, mutation_r); dna_mutator_list[indiv_id].nb_mut_ = dna_mutator_list[indiv_id].nb_swi_; dna_mutator_list[indiv_id].cpt_mut_ = dna_mutator_list[indiv_id].nb_mut_; nb_mutations[indiv_id] = dna_mutator_list[indiv_id].nb_mut_; atomicAdd(nb_mutations+nb_indivs,nb_mutations[indiv_id]); } __global__ void compute_tab_mutations_offset(int* nb_mutations, int* mutations_offset) { const int indiv_id = blockIdx.x; __shared__ int grid_mutation_offset; if (threadIdx.x == 0) { grid_mutation_offset = 0; } __syncthreads(); { int local_mutation_offset = 0; for (int cpt = threadIdx.x; cpt < indiv_id; cpt += blockDim.x) { local_mutation_offset += nb_mutations[cpt]; } if (local_mutation_offset > 0) atomicAdd(&grid_mutation_offset, local_mutation_offset); } __syncthreads(); if (threadIdx.x == 0) { mutations_offset[indiv_id] = grid_mutation_offset; } } __device__ static int mod(int a, int b) { assert(b > 0); while (a < 0) a += b; while (a >= b) a -= b; return a; } __global__ void predict_size_v2(size_t* dna_size, size_t* next_gen_dna_size, GPUDnaMutator* dna_mutator_list, TypeMutation* tab_mut, int* nb_mutations, int* mutations_offset, unsigned long long* gpu_counters,int* next_generation_reproducer, int max_genome_length, int min_genome_length, int nb_indiv) { const int indiv_id = blockIdx.x; int random_value; int transient_size = dna_size[next_generation_reproducer[indiv_id]]; Threefry::Device rng(gpu_counters,indiv_id,Threefry::Phase::MUTATION,nb_indiv); for (int mut_idx = 0; mut_idx < dna_mutator_list[indiv_id].nb_mut_; mut_idx++) { dna_mutator_list[indiv_id].cpt_mut_--; dna_mutator_list[indiv_id].nb_swi_--; int pos = rng.random(transient_size); tab_mut[mutations_offset[indiv_id]+mut_idx].type_ = MutationEventType::DO_SWITCH; tab_mut[mutations_offset[indiv_id]+mut_idx].pos_1_ = pos; } next_gen_dna_size[indiv_id] = transient_size; } __global__ void display_mut(TypeMutation* tab_mut, int* nb_mutations, int* mutations_offset) { for (int indiv_id = 0; indiv_id < 25; indiv_id++) { printf("nb mut %d : %d %d\n",indiv_id,nb_mutations[indiv_id],mutations_offset[indiv_id]); for (int i = 0; i < nb_mutations[indiv_id]; i++) { printf("%d -- %d %d %d %d %d %c%c%c%c%c%c %d\n", i, tab_mut[mutations_offset[indiv_id] + i].type_, tab_mut[mutations_offset[indiv_id] + i].pos_1_, tab_mut[mutations_offset[indiv_id] + i].pos_2_, tab_mut[mutations_offset[indiv_id] + i].pos_3_, tab_mut[mutations_offset[indiv_id] + i].number_, tab_mut[mutations_offset[indiv_id] + i].seq[0], tab_mut[mutations_offset[indiv_id] + i].seq[1], tab_mut[mutations_offset[indiv_id] + i].seq[2], tab_mut[mutations_offset[indiv_id] + i].seq[3], tab_mut[mutations_offset[indiv_id] + i].seq[4], tab_mut[mutations_offset[indiv_id] + i].seq[5], tab_mut[mutations_offset[indiv_id] + i].transient_size); } } } __global__ void compute_next_gen_dna_offset(size_t* next_gen_dna_size, size_t* next_gen_dna_offset) { const int indiv_id = blockIdx.x; __shared__ int grid_dna_offset; if (threadIdx.x == 0) { grid_dna_offset = 0; } __syncthreads(); { int local_dna_offset = 0; for (int cpt = threadIdx.x; cpt < indiv_id; cpt += blockDim.x) { local_dna_offset += next_gen_dna_size[cpt]; } if (local_dna_offset > 0) atomicAdd(&grid_dna_offset, local_dna_offset); } __syncthreads(); if (threadIdx.x == 0) { next_gen_dna_offset[indiv_id] = grid_dna_offset; } } __global__ void do_mutation_v2(TypeMutation* tab_mut, int* nb_mutations, size_t* dna_size, size_t* dna_offset, char* dna, char* next_gen_dna, size_t* next_gen_dna_size, size_t* next_gen_dna_offset, int* next_generation_reproducer, int* mutations_offset, unsigned long long int* nb_mut_bp) { int dna_pos_block = blockIdx.x; int indiv_id = blockIdx.y; int32_t locus = (dna_pos_block*128)+threadIdx.x; int32_t next_locus = locus; if (locus < next_gen_dna_size[indiv_id]) { int8_t mutate = 0; int nb_events = nb_mutations[indiv_id]; for (; nb_events > 0; nb_events--) { auto &mut = tab_mut[mutations_offset[indiv_id]+nb_events - 1]; switch (mut.type_) { case DO_SWITCH: if (locus == mut.pos_1_) mutate = not mutate; break; } } assert(locus >= 0); assert(locus < dna_size[next_generation_reproducer[indiv_id]]); auto base = dna[dna_offset[next_generation_reproducer[indiv_id]]+locus]; if (mutate) base = (base == '0') ? '1' : '0'; next_gen_dna[next_gen_dna_offset[indiv_id]+next_locus] = base; } } void run_a_step_on_GPU(int nb_indiv, double w_max, double selection_pressure, int grid_width, int grid_height, double mutation_rate) { int x_dim_size = (host_max_dna_size / 128)+1; int y_dim_size = nb_indiv; dim3 dimGrid(x_dim_size,y_dim_size); search_start_stop_RNA<<<dimGrid,128>>>(dna_size,dna,dna_offset, nb_promoters,dna_term,nb_indiv,global_dna_size,nb_mut_bp); int total_nb_promoters_host; checkCuda(cudaMemcpy(&total_nb_promoters_host, nb_promoters+nb_indiv, sizeof(int), cudaMemcpyDeviceToHost)); if (total_nb_promoters_host > current_size_rna_list) { checkCuda(cudaFree(rna)); current_size_rna_list = total_nb_promoters_host * 1.1; checkCuda(cudaMalloc(&rna,current_size_rna_list* sizeof(pRNA))); } compute_RNA_offset<<<nb_indiv,128>>>(nb_promoters,rna_offset); fill_RNA<<<dimGrid,128>>>( dna_term, dna_size,dna_offset, nb_promoters, rna_offset, rna, rna_idx,nb_indiv); int global_nb_rna; checkCuda(cudaMemcpy(&global_nb_rna, rna_idx+nb_indiv, sizeof(int), cudaMemcpyDeviceToHost)); compute_RNA<<<global_nb_rna/128+1,128>>>( dna_term,dna_size, dna_offset, rna, global_nb_rna); cudaDeviceSynchronize(); compute_start_protein<<<global_nb_rna,1>>>(start_protein, dna_size, dna_offset, rna, dna, nb_proteins, global_nb_rna, nb_indiv); cudaDeviceSynchronize(); int total_nb_protein_host; checkCuda(cudaMemcpy(&total_nb_protein_host, nb_proteins+nb_indiv, sizeof(int), cudaMemcpyDeviceToHost)); if (total_nb_protein_host > current_size_protein_list) { checkCuda(cudaFree(protein)); current_size_protein_list = total_nb_protein_host * 1.1; checkCuda(cudaMalloc(&protein,current_size_protein_list* sizeof(pProtein))); } compute_protein_offset<<<nb_indiv,128>>>(nb_proteins, protein_offset); fill_protein<<<global_nb_rna/128+1,128>>>(start_protein,dna_offset, protein_idx, protein_offset, rna, protein, dna_size, global_nb_rna, nb_indiv); int global_nb_protein; checkCuda(cudaMemcpy(&global_nb_protein, protein_idx+nb_indiv, sizeof(int), cudaMemcpyDeviceToHost)); compute_proteins<<<1,128>>>( start_protein, dna_size, dna_offset,protein, dna, global_nb_protein); translate_proteins<<<1,128>>>( protein, dna_size, dna, dna_offset, global_nb_protein, w_max); compute_phenotype<<<1,128>>>( protein,global_nb_protein, phenotype, phenotype_activ,phenotype_inhib, nb_indiv); compute_metaerror_fitness<<<nb_indiv,300>>>(selection_pressure,phenotype, phenotype_activ,phenotype_inhib, target, metaerror, fitness); // SELECTION selection<<<nb_indiv,NEIGHBORHOOD_SIZE>>>(fitness,next_generation_reproducer,gpu_counters, grid_width,grid_height,nb_indiv); // GENERATE MUTATION + PREDICT generate_mutations<<<nb_indiv,1>>>(gpu_counters,dna_size,nb_mutations,dna_mutator_list, next_generation_reproducer, nb_indiv,mutation_rate); compute_tab_mutations_offset<<<nb_indiv,1>>>(nb_mutations,mutations_offset); int total_nb_mutations_host; checkCuda(cudaMemcpy(&total_nb_mutations_host, nb_mutations+nb_indiv, sizeof(int), cudaMemcpyDeviceToHost)); if (total_nb_mutations_host > current_size_tab_mutation) { checkCuda(cudaFree(tab_mutation)); current_size_tab_mutation = total_nb_mutations_host * 1.1; checkCuda(cudaMalloc(&tab_mutation,current_size_tab_mutation* sizeof(TypeMutation))); } int min_genome_length_ = 10; int max_genome_length_ = 10000000; predict_size_v2<<<nb_indiv,1>>>(dna_size, next_gen_dna_size, dna_mutator_list, tab_mutation,nb_mutations,mutations_offset,gpu_counters,next_generation_reproducer, max_genome_length_,min_genome_length_,nb_indiv); cudaDeviceSynchronize(); // DO MUTATION std::vector <size_t> host_dna_size( nb_indiv); checkCuda(cudaMemcpy(host_dna_size.data(), next_gen_dna_size, nb_indiv * sizeof(size_t), cudaMemcpyDeviceToHost)); global_dna_size=0; for (int i = 0; i < nb_indiv; i++) { global_dna_size += host_dna_size[i]; host_max_dna_size = host_max_dna_size < host_dna_size[i] ? host_dna_size[i] : host_max_dna_size; } bool haveChange = false; if (global_dna_size >= allocated_global_dna_size) { haveChange = true; allocated_global_dna_size = global_dna_size*2; checkCuda(cudaMalloc((void **) &next_gen_dna, allocated_global_dna_size * sizeof(char))); checkCuda(cudaFree(dna_term)); checkCuda(cudaMalloc((void **) &dna_term, allocated_global_dna_size * sizeof(int8_t * ))); checkCuda(cudaFree(start_protein)); checkCuda(cudaMalloc((void **) &start_protein, allocated_global_dna_size * sizeof(int8_t * ))); } compute_next_gen_dna_offset<<<nb_indiv,128>>>(next_gen_dna_size, next_gen_dna_offset); x_dim_size = (host_max_dna_size / 128)+1; y_dim_size = nb_indiv; dim3 dimGrid2(x_dim_size,y_dim_size); do_mutation_v2<<<dimGrid2,128>>>(tab_mutation, nb_mutations, dna_size, dna_offset, dna, next_gen_dna, next_gen_dna_size, next_gen_dna_offset,next_generation_reproducer, mutations_offset,nb_mut_bp); //printf("DNA 1 %p\n",dna); //next_generation_dna_read<<<1,1>>>(next_gen_dna, next_gen_dna_offset,next_gen_dna_size, global_dna_size); // SWITCH STRUCTURE int block = ceil(nb_indiv/32); do_memset<<<block,32>>>(phenotype_activ,phenotype_inhib,nb_mutations,rna_idx,protein_idx,nb_proteins, nb_promoters,next_gen_dna_size, nb_indiv); //allocate_next_gen(nb_indiv); //printf("DNA 2 %p\n",dna); size_t* tmp_dna_size = dna_size; dna_size = next_gen_dna_size; next_gen_dna_size = tmp_dna_size; size_t* tmp_dna_offset = dna_offset; dna_offset = next_gen_dna_offset; next_gen_dna_offset = tmp_dna_offset; //global_dna_size = global_next_gen_dna_size; cudaDeviceSynchronize(); assert(dna!=0); //printf("DNA 3 %p\n",dna); if (haveChange) { checkCuda(cudaFree(dna)); checkCuda(cudaMalloc((void **) &dna, allocated_global_dna_size * sizeof(char))); } //printf("DNA 4 %p\n",dna); cudaDeviceSynchronize(); char* dna_tmp = dna; dna = next_gen_dna; next_gen_dna = dna_tmp; // clean(exp_m); } void allocate_next_gen(int nb_indiv) { for (int indiv_id = 0; indiv_id < nb_indiv; indiv_id++) { checkCuda(cudaMemset(host_phenotype[indiv_id], 0.0, 300 * sizeof(double))); checkCuda(cudaMemset(host_phenotype_activ[indiv_id], 0.0, 300 * sizeof(double))); checkCuda(cudaMemset(host_phenotype_inhib[indiv_id], 0.0, 300 * sizeof(double))); } checkCuda(cudaMemset(nb_mutations, 0, (nb_indiv+1) * sizeof(int))); checkCuda(cudaMemset(mutations_offset, 0, nb_indiv * sizeof(int))); checkCuda(cudaMemset(mutations_idx, 0, nb_indiv * sizeof(int))); checkCuda(cudaMemset(rna_idx, 0, (nb_indiv+1) * sizeof(int32_t))); checkCuda(cudaMemset(rna_offset, 0, nb_indiv * sizeof(int32_t))); checkCuda(cudaMemset(protein_idx, 0, (nb_indiv+1) * sizeof(int32_t))); checkCuda(cudaMemset(protein_offset, 0, nb_indiv * sizeof(int32_t))); checkCuda(cudaMemset(nb_proteins, 0, (nb_indiv+1) * sizeof(int))); checkCuda(cudaMemset(nb_promoters, 0, (nb_indiv+1) * sizeof(int))); } __global__ void do_memset(double** phenotype_activ, double** phenotype_inhib, int* nb_mutations, int32_t* rna_idx, int32_t* protein_idx, int* nb_proteins, int* nb_promoters, size_t* dna_size, int nb_indiv) { const int indiv_id = blockIdx.x * blockDim.x + threadIdx.x; if (indiv_id < nb_indiv) { for (int i = 0; i < 300; i++) { phenotype_inhib[indiv_id][i] = 0; phenotype_activ[indiv_id][i] = 0; } rna_idx[indiv_id] = 0; protein_idx[indiv_id] = 0; nb_proteins[indiv_id] = 0; nb_promoters[indiv_id] = 0; if (indiv_id == 0) { nb_mutations[nb_indiv] = 0; rna_idx[nb_indiv] = 0; protein_idx[nb_indiv] = 0; nb_proteins[nb_indiv] = 0; nb_promoters[nb_indiv] = 0; } } }
17f80dcef9fd46b30dc3bde09c01809e6237f437.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once const int BLOCK_SIZE_X = 32; const int BLOCK_SIZE_Y = 32; __global__ void matrixTransposeKernelShM(const double* d_matrix_in, double* d_matrix_out, int N, int M ) { __shared__ int sh_matrix[BLOCK_SIZE_X*BLOCK_SIZE_Y]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < N && y < M){ sh_matrix[threadIdx.x + threadIdx.y * BLOCK_SIZE_Y]= d_matrix_in[x*M + y]; __syncthreads(); d_matrix_out[y*N + x] = sh_matrix[threadIdx.x + threadIdx.y * BLOCK_SIZE_Y]; } } void cudaExpMatrix::transpose(){ double *d_matrix_out, *d_matrix_in; std::cout << "\nTransposing expression matrix..."; hipMalloc( &d_matrix_in , V*M * sizeof(double)); hipMalloc( &d_matrix_out, M*V * sizeof(double)); hipMemcpy( d_matrix_in, expMatrix.eMatrix, V*M * sizeof(double), hipMemcpyHostToDevice); hipError_t("Matrix Allocation Trasposition"); dim3 num_blocks(V/BLOCK_SIZE_X, V/BLOCK_SIZE_Y, 1); if (V%BLOCK_SIZE_X) { num_blocks.x++; num_blocks.y++; } dim3 block_size(BLOCK_SIZE_X , BLOCK_SIZE_Y, 1); hipLaunchKernelGGL(( matrixTransposeKernelShM), dim3(num_blocks), dim3(block_size) , 0, 0, d_matrix_in, d_matrix_out, V, M); hipError_t("Matrix Trasposition"); //double* h_matrix_tmp = new double[M*V]; hipMemcpy( expMatrix.eMatrix, d_matrix_out, M*V*sizeof(double), hipMemcpyDeviceToHost); hipError_t("Matrix Trasposition copy output"); hipFree( d_matrix_in ); hipFree( d_matrix_out ); std::cout << "Complete" << '\n'; //for (int i = 0 ; i < M ; ++i) { // for (int j = 0 ; j < V ; ++j) { // std::cout << h_matrix_tmp[i*V + j] << ' '; // } // std::cout << '\n'; //} }
17f80dcef9fd46b30dc3bde09c01809e6237f437.cu
#pragma once const int BLOCK_SIZE_X = 32; const int BLOCK_SIZE_Y = 32; __global__ void matrixTransposeKernelShM(const double* d_matrix_in, double* d_matrix_out, int N, int M ) { __shared__ int sh_matrix[BLOCK_SIZE_X*BLOCK_SIZE_Y]; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < N && y < M){ sh_matrix[threadIdx.x + threadIdx.y * BLOCK_SIZE_Y]= d_matrix_in[x*M + y]; __syncthreads(); d_matrix_out[y*N + x] = sh_matrix[threadIdx.x + threadIdx.y * BLOCK_SIZE_Y]; } } void cudaExpMatrix::transpose(){ double *d_matrix_out, *d_matrix_in; std::cout << "\nTransposing expression matrix..."; cudaMalloc( &d_matrix_in , V*M * sizeof(double)); cudaMalloc( &d_matrix_out, M*V * sizeof(double)); cudaMemcpy( d_matrix_in, expMatrix.eMatrix, V*M * sizeof(double), cudaMemcpyHostToDevice); cudaError("Matrix Allocation Trasposition"); dim3 num_blocks(V/BLOCK_SIZE_X, V/BLOCK_SIZE_Y, 1); if (V%BLOCK_SIZE_X) { num_blocks.x++; num_blocks.y++; } dim3 block_size(BLOCK_SIZE_X , BLOCK_SIZE_Y, 1); matrixTransposeKernelShM<<< num_blocks, block_size >>>(d_matrix_in, d_matrix_out, V, M); cudaError("Matrix Trasposition"); //double* h_matrix_tmp = new double[M*V]; cudaMemcpy( expMatrix.eMatrix, d_matrix_out, M*V*sizeof(double), cudaMemcpyDeviceToHost); cudaError("Matrix Trasposition copy output"); cudaFree( d_matrix_in ); cudaFree( d_matrix_out ); std::cout << "Complete" << '\n'; //for (int i = 0 ; i < M ; ++i) { // for (int j = 0 ; j < V ; ++j) { // std::cout << h_matrix_tmp[i*V + j] << ' '; // } // std::cout << '\n'; //} }
c7662b9727800f538de602b2b900c18d1942bc21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/cross_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/index_calculator.h" namespace phi { template <typename T> __global__ void CrossGrad(const T* x, const T* y, const T* out, T* out_dx, T* out_dy, const int stride, const int N, phi::funcs::IndexCalculator index_calculator) { CUDA_KERNEL_LOOP(i, N) { int offset = index_calculator(i); auto pos0 = offset + 0 * stride; auto pos1 = offset + 1 * stride; auto pos2 = offset + 2 * stride; using MPType = typename phi::dtype::MPTypeTrait<T>::Type; MPType x_pos0_mp = static_cast<MPType>(x[pos0]); MPType x_pos1_mp = static_cast<MPType>(x[pos1]); MPType x_pos2_mp = static_cast<MPType>(x[pos2]); MPType y_pos0_mp = static_cast<MPType>(y[pos0]); MPType y_pos1_mp = static_cast<MPType>(y[pos1]); MPType y_pos2_mp = static_cast<MPType>(y[pos2]); MPType out_pos0_mp = static_cast<MPType>(out[pos0]); MPType out_pos1_mp = static_cast<MPType>(out[pos1]); MPType out_pos2_mp = static_cast<MPType>(out[pos2]); out_dx[pos0] = static_cast<T>(out_pos2_mp * y_pos1_mp - out_pos1_mp * y_pos2_mp); out_dy[pos0] = static_cast<T>(out_pos1_mp * x_pos2_mp - out_pos2_mp * x_pos1_mp); out_dx[pos1] = static_cast<T>(out_pos0_mp * y_pos2_mp - out_pos2_mp * y_pos0_mp); out_dy[pos1] = static_cast<T>(out_pos2_mp * x_pos0_mp - out_pos0_mp * x_pos2_mp); out_dx[pos2] = static_cast<T>(out_pos1_mp * y_pos0_mp - out_pos0_mp * y_pos1_mp); out_dy[pos2] = static_cast<T>(out_pos0_mp * x_pos1_mp - out_pos1_mp * x_pos0_mp); } } template <typename T, typename Context> void CrossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& out_grad, int axis, DenseTensor* x_grad, DenseTensor* y_grad) { auto& input_x = x; auto& input_y = y; auto& input_out_grad = out_grad; auto* output_x_grad = x_grad; auto* output_y_grad = y_grad; int dim = axis; auto input_x_dims = input_x.dims(); if (dim != DDim::kMaxRank) { PADDLE_ENFORCE_EQ( dim < input_x_dims.size() && dim >= (0 - input_x_dims.size()), true, errors::OutOfRange( "Attr(dim) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(dim) = %d.", input_x_dims.size(), input_x_dims.size() - 1, dim)); if (dim < 0) { dim += input_x_dims.size(); } PADDLE_ENFORCE_EQ( input_x_dims[dim] == 3, true, errors::InvalidArgument( "Input(X/Y).dims[dim] must be equal to 3. But received: " "Input(X/Y).dims[dim] = [%d].", input_x_dims[dim])); } else { for (auto i = 0; i < input_x_dims.size(); i++) { if (input_x_dims[i] == 3) { dim = i; break; } } PADDLE_ENFORCE_EQ( dim == DDim::kMaxRank, false, errors::InvalidArgument("There must be at least one dimension 'd' " "so that Input(X/Y).dims()[d] is equal to 3. " "But received: Input(X/Y).dims() == [%s].", input_x_dims)); } std::vector<int> cal_dims; std::vector<int> left_strides; std::vector<int> full_strides; std::vector<int> merged_dims; for (int i = 0; i < dim; i++) { if (i == 0) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[0] *= input_x_dims[i]; } } int merge_axis = merged_dims.size(); merged_dims.push_back(input_x_dims[dim]); for (int i = dim + 1; i < input_x_dims.size(); i++) { if (i == dim + 1) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[merge_axis + 1] *= input_x_dims[i]; } } int full_dim = 1; for (int i = 0; i < merged_dims.size(); i++) { full_strides.insert(full_strides.begin(), full_dim); full_dim *= merged_dims[merged_dims.size() - i - 1]; if (i == merge_axis) { continue; } cal_dims.push_back(i); } int left_dim = 1; for (int i = merged_dims.size() - 1; i >= 0; i--) { if (i == merge_axis) { continue; } left_strides.insert(left_strides.begin(), left_dim); left_dim *= merged_dims[i]; } const auto* input_x_data = input_x.data<T>(); const auto* input_y_data = input_y.data<T>(); const auto* input_out_grad_data = input_out_grad.data<T>(); auto* output_x_grad_data = dev_ctx.template Alloc<T>(x_grad); auto* output_y_grad_data = dev_ctx.template Alloc<T>(y_grad); auto index_calculator = phi::funcs::IndexCalculator( merged_dims.size() - 1, cal_dims, left_strides, full_strides); int64_t numel = x.numel(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel / 3); hipLaunchKernelGGL(( CrossGrad), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), input_x_data, input_y_data, input_out_grad_data, output_x_grad_data, output_y_grad_data, full_strides[merge_axis], numel / 3, index_calculator); } } // namespace phi PD_REGISTER_KERNEL(cross_grad, GPU, ALL_LAYOUT, phi::CrossGradKernel, phi::dtype::float16, phi::dtype::bfloat16, float, double, int, int64_t) {}
c7662b9727800f538de602b2b900c18d1942bc21.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/cross_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/index_calculator.h" namespace phi { template <typename T> __global__ void CrossGrad(const T* x, const T* y, const T* out, T* out_dx, T* out_dy, const int stride, const int N, phi::funcs::IndexCalculator index_calculator) { CUDA_KERNEL_LOOP(i, N) { int offset = index_calculator(i); auto pos0 = offset + 0 * stride; auto pos1 = offset + 1 * stride; auto pos2 = offset + 2 * stride; using MPType = typename phi::dtype::MPTypeTrait<T>::Type; MPType x_pos0_mp = static_cast<MPType>(x[pos0]); MPType x_pos1_mp = static_cast<MPType>(x[pos1]); MPType x_pos2_mp = static_cast<MPType>(x[pos2]); MPType y_pos0_mp = static_cast<MPType>(y[pos0]); MPType y_pos1_mp = static_cast<MPType>(y[pos1]); MPType y_pos2_mp = static_cast<MPType>(y[pos2]); MPType out_pos0_mp = static_cast<MPType>(out[pos0]); MPType out_pos1_mp = static_cast<MPType>(out[pos1]); MPType out_pos2_mp = static_cast<MPType>(out[pos2]); out_dx[pos0] = static_cast<T>(out_pos2_mp * y_pos1_mp - out_pos1_mp * y_pos2_mp); out_dy[pos0] = static_cast<T>(out_pos1_mp * x_pos2_mp - out_pos2_mp * x_pos1_mp); out_dx[pos1] = static_cast<T>(out_pos0_mp * y_pos2_mp - out_pos2_mp * y_pos0_mp); out_dy[pos1] = static_cast<T>(out_pos2_mp * x_pos0_mp - out_pos0_mp * x_pos2_mp); out_dx[pos2] = static_cast<T>(out_pos1_mp * y_pos0_mp - out_pos0_mp * y_pos1_mp); out_dy[pos2] = static_cast<T>(out_pos0_mp * x_pos1_mp - out_pos1_mp * x_pos0_mp); } } template <typename T, typename Context> void CrossGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& out_grad, int axis, DenseTensor* x_grad, DenseTensor* y_grad) { auto& input_x = x; auto& input_y = y; auto& input_out_grad = out_grad; auto* output_x_grad = x_grad; auto* output_y_grad = y_grad; int dim = axis; auto input_x_dims = input_x.dims(); if (dim != DDim::kMaxRank) { PADDLE_ENFORCE_EQ( dim < input_x_dims.size() && dim >= (0 - input_x_dims.size()), true, errors::OutOfRange( "Attr(dim) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(dim) = %d.", input_x_dims.size(), input_x_dims.size() - 1, dim)); if (dim < 0) { dim += input_x_dims.size(); } PADDLE_ENFORCE_EQ( input_x_dims[dim] == 3, true, errors::InvalidArgument( "Input(X/Y).dims[dim] must be equal to 3. But received: " "Input(X/Y).dims[dim] = [%d].", input_x_dims[dim])); } else { for (auto i = 0; i < input_x_dims.size(); i++) { if (input_x_dims[i] == 3) { dim = i; break; } } PADDLE_ENFORCE_EQ( dim == DDim::kMaxRank, false, errors::InvalidArgument("There must be at least one dimension 'd' " "so that Input(X/Y).dims()[d] is equal to 3. " "But received: Input(X/Y).dims() == [%s].", input_x_dims)); } std::vector<int> cal_dims; std::vector<int> left_strides; std::vector<int> full_strides; std::vector<int> merged_dims; for (int i = 0; i < dim; i++) { if (i == 0) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[0] *= input_x_dims[i]; } } int merge_axis = merged_dims.size(); merged_dims.push_back(input_x_dims[dim]); for (int i = dim + 1; i < input_x_dims.size(); i++) { if (i == dim + 1) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[merge_axis + 1] *= input_x_dims[i]; } } int full_dim = 1; for (int i = 0; i < merged_dims.size(); i++) { full_strides.insert(full_strides.begin(), full_dim); full_dim *= merged_dims[merged_dims.size() - i - 1]; if (i == merge_axis) { continue; } cal_dims.push_back(i); } int left_dim = 1; for (int i = merged_dims.size() - 1; i >= 0; i--) { if (i == merge_axis) { continue; } left_strides.insert(left_strides.begin(), left_dim); left_dim *= merged_dims[i]; } const auto* input_x_data = input_x.data<T>(); const auto* input_y_data = input_y.data<T>(); const auto* input_out_grad_data = input_out_grad.data<T>(); auto* output_x_grad_data = dev_ctx.template Alloc<T>(x_grad); auto* output_y_grad_data = dev_ctx.template Alloc<T>(y_grad); auto index_calculator = phi::funcs::IndexCalculator( merged_dims.size() - 1, cal_dims, left_strides, full_strides); int64_t numel = x.numel(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel / 3); CrossGrad<<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(input_x_data, input_y_data, input_out_grad_data, output_x_grad_data, output_y_grad_data, full_strides[merge_axis], numel / 3, index_calculator); } } // namespace phi PD_REGISTER_KERNEL(cross_grad, GPU, ALL_LAYOUT, phi::CrossGradKernel, phi::dtype::float16, phi::dtype::bfloat16, float, double, int, int64_t) {}
2439ec04111810e757c1b93a27af434c4dfe163a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c @author Ichitaro Yamazaki */ #include "magma_internal.h" #define NB 64 #define A(i,j) (A[(i) + (j)*lda]) #define W(i,j) (W[(i) + (j)*ldw]) // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void zlascl_2x2_lower( int m, const magmaDoubleComplex* W, int ldw, magmaDoubleComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; magmaDoubleComplex D21 = W( 1, 0 ); magmaDoubleComplex D11 = MAGMA_Z_DIV( W( 1, 1 ), D21 ); magmaDoubleComplex D22 = MAGMA_Z_DIV( W( 0, 0 ), MAGMA_Z_CONJ( D21 ) ); double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 ); D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 ); if (ind < m) { A( ind, 0 ) = MAGMA_Z_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) ); A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) ); } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void zlascl_2x2_upper( int m, const magmaDoubleComplex *W, int ldw, magmaDoubleComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; magmaDoubleComplex D21 = W( m, 1 ); magmaDoubleComplex D11 = MAGMA_Z_DIV( W( m+1, 1 ), MAGMA_Z_CONJ( D21 ) ); magmaDoubleComplex D22 = MAGMA_Z_DIV( W( m, 0 ), D21 ); double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 ); D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 ); if (ind < m) { A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) ); A( ind, 1 ) = MAGMA_Z_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) ); } } /***************************************************************************//** Purpose ------- ZLASCL_2x2 scales the M by M complex matrix A by the 2-by-2 pivot. TYPE specifies that A may be upper or lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] dW DOUBLE PRECISION vector, dimension (2*lddw) The matrix containing the 2-by-2 pivot. @param[in] lddw INTEGER The leading dimension of the array W. LDDA >= max(1,M). @param[in,out] dA COMPLEX*16 array, dimension (LDDA,N) The matrix to be scaled by dW. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lascl_2x2 *******************************************************************************/ extern "C" void magmablas_zlascl_2x2( magma_type_t type, magma_int_t m, magmaDoubleComplex_const_ptr dW, magma_int_t lddw, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper ) *info = -1; else if ( m < 0 ) *info = -2; else if ( ldda < max(1,m) ) *info = -4; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if (type == MagmaLower) { hipLaunchKernelGGL(( zlascl_2x2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda); } else { hipLaunchKernelGGL(( zlascl_2x2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda); } }
2439ec04111810e757c1b93a27af434c4dfe163a.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c @author Ichitaro Yamazaki */ #include "magma_internal.h" #define NB 64 #define A(i,j) (A[(i) + (j)*lda]) #define W(i,j) (W[(i) + (j)*ldw]) // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void zlascl_2x2_lower( int m, const magmaDoubleComplex* W, int ldw, magmaDoubleComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; magmaDoubleComplex D21 = W( 1, 0 ); magmaDoubleComplex D11 = MAGMA_Z_DIV( W( 1, 1 ), D21 ); magmaDoubleComplex D22 = MAGMA_Z_DIV( W( 0, 0 ), MAGMA_Z_CONJ( D21 ) ); double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 ); D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 ); if (ind < m) { A( ind, 0 ) = MAGMA_Z_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) ); A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) ); } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void zlascl_2x2_upper( int m, const magmaDoubleComplex *W, int ldw, magmaDoubleComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; magmaDoubleComplex D21 = W( m, 1 ); magmaDoubleComplex D11 = MAGMA_Z_DIV( W( m+1, 1 ), MAGMA_Z_CONJ( D21 ) ); magmaDoubleComplex D22 = MAGMA_Z_DIV( W( m, 0 ), D21 ); double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 ); D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 ); if (ind < m) { A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) ); A( ind, 1 ) = MAGMA_Z_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) ); } } /***************************************************************************//** Purpose ------- ZLASCL_2x2 scales the M by M complex matrix A by the 2-by-2 pivot. TYPE specifies that A may be upper or lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] dW DOUBLE PRECISION vector, dimension (2*lddw) The matrix containing the 2-by-2 pivot. @param[in] lddw INTEGER The leading dimension of the array W. LDDA >= max(1,M). @param[in,out] dA COMPLEX*16 array, dimension (LDDA,N) The matrix to be scaled by dW. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lascl_2x2 *******************************************************************************/ extern "C" void magmablas_zlascl_2x2( magma_type_t type, magma_int_t m, magmaDoubleComplex_const_ptr dW, magma_int_t lddw, magmaDoubleComplex_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper ) *info = -1; else if ( m < 0 ) *info = -2; else if ( ldda < max(1,m) ) *info = -4; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if (type == MagmaLower) { zlascl_2x2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda); } else { zlascl_2x2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda); } }
1353255d590e823c7e06e622c5f3ad36cc40e6b4.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/NumericLimits.cuh> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include <assert.h> using namespace at; __device__ void test(){ // test half construction and implicit conversions in device assert(Half(3) == Half(3.0f)); assert(static_cast<Half>(3.0f) == Half(3.0f)); // there is no float <=> __half implicit conversion assert(static_cast<Half>(3.0f) == 3.0f); __half a = __float2half(3.0f); __half b = __float2half(2.0f); __half c = a - Half(b); assert(static_cast<Half>(c) == Half(1.0)); // asserting if the functions used on // half types give almost equivalent results when using // functions on double. // The purpose of these asserts are to test the device side // half API for the common mathematical functions. // Note: When calling std math functions from device, don't // use the std namespace, but just "::" so that the function // gets resolved from nvcc math_functions.hpp float threshold = 0.00001; assert(::abs(::lgamma(Half(10.0)) - ::lgamma(10.0f)) <= threshold); assert(::abs(::exp(Half(1.0)) - ::exp(1.0f)) <= threshold); assert(::abs(::log(Half(1.0)) - ::log(1.0f)) <= threshold); assert(::abs(::log10(Half(1000.0)) - ::log10(1000.0f)) <= threshold); assert(::abs(::log1p(Half(0.0)) - ::log1p(0.0f)) <= threshold); assert(::abs(::log2(Half(1000.0)) - ::log2(1000.0f)) <= threshold); assert(::abs(::expm1(Half(1.0)) - ::expm1(1.0f)) <= threshold); assert(::abs(::cos(Half(0.0)) - ::cos(0.0f)) <= threshold); assert(::abs(::sin(Half(0.0)) - ::sin(0.0f)) <= threshold); assert(::abs(::sqrt(Half(100.0)) - ::sqrt(100.0f)) <= threshold); assert(::abs(::ceil(Half(2.4)) - ::ceil(2.4f)) <= threshold); assert(::abs(::floor(Half(2.7)) - ::floor(2.7f)) <= threshold); assert(::abs(::trunc(Half(2.7)) - ::trunc(2.7f)) <= threshold); assert(::abs(::acos(Half(-1.0)) - ::acos(-1.0f)) <= threshold); assert(::abs(::cosh(Half(1.0)) - ::cosh(1.0f)) <= threshold); assert(::abs(::acosh(Half(1.0)) - ::acosh(1.0f)) <= threshold); assert(::abs(::asin(Half(1.0)) - ::asin(1.0f)) <= threshold); assert(::abs(::sinh(Half(1.0)) - ::sinh(1.0f)) <= threshold); assert(::abs(::asinh(Half(1.0)) - ::asinh(1.0f)) <= threshold); assert(::abs(::tan(Half(0.0)) - ::tan(0.0f)) <= threshold); assert(::abs(::atan(Half(1.0)) - ::atan(1.0f)) <= threshold); assert(::abs(::tanh(Half(1.0)) - ::tanh(1.0f)) <= threshold); assert(::abs(::erf(Half(10.0)) - ::erf(10.0f)) <= threshold); assert(::abs(::erfc(Half(10.0)) - ::erfc(10.0f)) <= threshold); assert(::abs(::abs(Half(-3.0)) - ::abs(-3.0f)) <= threshold); assert(::abs(::round(Half(2.3)) - ::round(2.3f)) <= threshold); assert(::abs(::pow(Half(2.0), Half(10.0)) - ::pow(2.0f, 10.0f)) <= threshold); assert( ::abs(::atan2(Half(7.0), Half(0.0)) - ::atan2(7.0f, 0.0f)) <= threshold); // note: can't use namespace on isnan and isinf in device code #ifdef _MSC_VER // Windows requires this explicit conversion. The reason is unclear // related issue with clang: https://reviews.llvm.org/D37906 assert(::abs(::isnan((float)Half(0.0)) - ::isnan(0.0f)) <= threshold); assert(::abs(::isinf((float)Half(0.0)) - ::isinf(0.0f)) <= threshold); #else assert(::abs(::isnan(Half(0.0)) - ::isnan(0.0f)) <= threshold); assert(::abs(::isinf(Half(0.0)) - ::isinf(0.0f)) <= threshold); #endif } __global__ void kernel(){ test(); } void launch_function(){ hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, ); } // half common math functions tests in device TEST(HalfCuda, HalfCuda) { if (!at::cuda::is_available()) return; launch_function(); hipError_t err = hipDeviceSynchronize(); bool isEQ = err == hipSuccess; ASSERT_TRUE(isEQ); }
1353255d590e823c7e06e622c5f3ad36cc40e6b4.cu
#include <gtest/gtest.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/NumericLimits.cuh> #include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include <assert.h> using namespace at; __device__ void test(){ // test half construction and implicit conversions in device assert(Half(3) == Half(3.0f)); assert(static_cast<Half>(3.0f) == Half(3.0f)); // there is no float <=> __half implicit conversion assert(static_cast<Half>(3.0f) == 3.0f); __half a = __float2half(3.0f); __half b = __float2half(2.0f); __half c = a - Half(b); assert(static_cast<Half>(c) == Half(1.0)); // asserting if the functions used on // half types give almost equivalent results when using // functions on double. // The purpose of these asserts are to test the device side // half API for the common mathematical functions. // Note: When calling std math functions from device, don't // use the std namespace, but just "::" so that the function // gets resolved from nvcc math_functions.hpp float threshold = 0.00001; assert(::abs(::lgamma(Half(10.0)) - ::lgamma(10.0f)) <= threshold); assert(::abs(::exp(Half(1.0)) - ::exp(1.0f)) <= threshold); assert(::abs(::log(Half(1.0)) - ::log(1.0f)) <= threshold); assert(::abs(::log10(Half(1000.0)) - ::log10(1000.0f)) <= threshold); assert(::abs(::log1p(Half(0.0)) - ::log1p(0.0f)) <= threshold); assert(::abs(::log2(Half(1000.0)) - ::log2(1000.0f)) <= threshold); assert(::abs(::expm1(Half(1.0)) - ::expm1(1.0f)) <= threshold); assert(::abs(::cos(Half(0.0)) - ::cos(0.0f)) <= threshold); assert(::abs(::sin(Half(0.0)) - ::sin(0.0f)) <= threshold); assert(::abs(::sqrt(Half(100.0)) - ::sqrt(100.0f)) <= threshold); assert(::abs(::ceil(Half(2.4)) - ::ceil(2.4f)) <= threshold); assert(::abs(::floor(Half(2.7)) - ::floor(2.7f)) <= threshold); assert(::abs(::trunc(Half(2.7)) - ::trunc(2.7f)) <= threshold); assert(::abs(::acos(Half(-1.0)) - ::acos(-1.0f)) <= threshold); assert(::abs(::cosh(Half(1.0)) - ::cosh(1.0f)) <= threshold); assert(::abs(::acosh(Half(1.0)) - ::acosh(1.0f)) <= threshold); assert(::abs(::asin(Half(1.0)) - ::asin(1.0f)) <= threshold); assert(::abs(::sinh(Half(1.0)) - ::sinh(1.0f)) <= threshold); assert(::abs(::asinh(Half(1.0)) - ::asinh(1.0f)) <= threshold); assert(::abs(::tan(Half(0.0)) - ::tan(0.0f)) <= threshold); assert(::abs(::atan(Half(1.0)) - ::atan(1.0f)) <= threshold); assert(::abs(::tanh(Half(1.0)) - ::tanh(1.0f)) <= threshold); assert(::abs(::erf(Half(10.0)) - ::erf(10.0f)) <= threshold); assert(::abs(::erfc(Half(10.0)) - ::erfc(10.0f)) <= threshold); assert(::abs(::abs(Half(-3.0)) - ::abs(-3.0f)) <= threshold); assert(::abs(::round(Half(2.3)) - ::round(2.3f)) <= threshold); assert(::abs(::pow(Half(2.0), Half(10.0)) - ::pow(2.0f, 10.0f)) <= threshold); assert( ::abs(::atan2(Half(7.0), Half(0.0)) - ::atan2(7.0f, 0.0f)) <= threshold); // note: can't use namespace on isnan and isinf in device code #ifdef _MSC_VER // Windows requires this explicit conversion. The reason is unclear // related issue with clang: https://reviews.llvm.org/D37906 assert(::abs(::isnan((float)Half(0.0)) - ::isnan(0.0f)) <= threshold); assert(::abs(::isinf((float)Half(0.0)) - ::isinf(0.0f)) <= threshold); #else assert(::abs(::isnan(Half(0.0)) - ::isnan(0.0f)) <= threshold); assert(::abs(::isinf(Half(0.0)) - ::isinf(0.0f)) <= threshold); #endif } __global__ void kernel(){ test(); } void launch_function(){ kernel<<<1, 1>>>(); } // half common math functions tests in device TEST(HalfCuda, HalfCuda) { if (!at::cuda::is_available()) return; launch_function(); cudaError_t err = cudaDeviceSynchronize(); bool isEQ = err == cudaSuccess; ASSERT_TRUE(isEQ); }
17ac95e1fdaf09c02e4416d8b7d69ea558986c62.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@finding matrix dimension and matching with thread and matrix element int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = blockIdx.y * TILE_WIDTH + threadIdx.y; int Col = blockIdx.x * TILE_WIDTH + threadIdx.x; float Pvalue = 0; //@@declaring shared memory __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; for (int p = 0; p < ((numAColumns - 1) / TILE_WIDTH + 1); ++p) { if (Row < numARows && p * TILE_WIDTH + tx < numAColumns) { ds_M[ty][tx] = A[Row * numAColumns + p * TILE_WIDTH + tx]; } else { ds_M[ty][tx] = 0.0; } if (p * TILE_WIDTH + ty < numBRows && Col < numBColumns){ ds_N[ty][tx] = B[(p * TILE_WIDTH + ty) * numBColumns + Col]; } else { ds_N[ty][tx] = 0.0; } __syncthreads(); //@@to keep track the shared memory as if all threads can be done at same time if (Row < numCRows && Col < numCColumns){ for (int i = 0; i < TILE_WIDTH; ++i) { Pvalue += ds_M[ty][i] * ds_N[i][tx]; } __syncthreads(); } } if (Row < numCRows && Col < numCColumns) { C[Row * numCColumns + Col] = Pvalue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C int numCColumns; // number of columns in the matrix C int sizeA, sizeB, sizeC; hostC = NULL; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; sizeA = numARows * numAColumns * sizeof(float); sizeB = numBRows * numBColumns * sizeof(float); sizeC = numCRows * numCColumns * sizeof(float); //@@ Allocate the hostC matrix hostC = (float*) malloc(sizeC); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory wbCheck(hipMalloc((void**) &deviceA, sizeA)); wbCheck(hipMalloc((void**) &deviceB, sizeB)); wbCheck(hipMalloc((void**) &deviceC, sizeC)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU wbCheck(hipMemcpy(deviceA, hostA, sizeA, hipMemcpyHostToDevice)); wbCheck(hipMemcpy(deviceB, hostB, sizeB, hipMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid(numBColumns / TILE_WIDTH, numARows / TILE_WIDTH, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel hipLaunchKernelGGL(( matrixMultiplyShared), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); wbCheck(hipDeviceSynchronize()); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU wbCheck(hipMemcpy(hostC, deviceC, sizeC, hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory wbCheck(hipFree(deviceA)); wbCheck(hipFree(deviceB)); wbCheck(hipFree(deviceC)); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
17ac95e1fdaf09c02e4416d8b7d69ea558986c62.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@finding matrix dimension and matching with thread and matrix element int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = blockIdx.y * TILE_WIDTH + threadIdx.y; int Col = blockIdx.x * TILE_WIDTH + threadIdx.x; float Pvalue = 0; //@@declaring shared memory __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; for (int p = 0; p < ((numAColumns - 1) / TILE_WIDTH + 1); ++p) { if (Row < numARows && p * TILE_WIDTH + tx < numAColumns) { ds_M[ty][tx] = A[Row * numAColumns + p * TILE_WIDTH + tx]; } else { ds_M[ty][tx] = 0.0; } if (p * TILE_WIDTH + ty < numBRows && Col < numBColumns){ ds_N[ty][tx] = B[(p * TILE_WIDTH + ty) * numBColumns + Col]; } else { ds_N[ty][tx] = 0.0; } __syncthreads(); //@@to keep track the shared memory as if all threads can be done at same time if (Row < numCRows && Col < numCColumns){ for (int i = 0; i < TILE_WIDTH; ++i) { Pvalue += ds_M[ty][i] * ds_N[i][tx]; } __syncthreads(); } } if (Row < numCRows && Col < numCColumns) { C[Row * numCColumns + Col] = Pvalue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C int numCColumns; // number of columns in the matrix C int sizeA, sizeB, sizeC; hostC = NULL; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; sizeA = numARows * numAColumns * sizeof(float); sizeB = numBRows * numBColumns * sizeof(float); sizeC = numCRows * numCColumns * sizeof(float); //@@ Allocate the hostC matrix hostC = (float*) malloc(sizeC); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory wbCheck(cudaMalloc((void**) &deviceA, sizeA)); wbCheck(cudaMalloc((void**) &deviceB, sizeB)); wbCheck(cudaMalloc((void**) &deviceC, sizeC)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU wbCheck(cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid(numBColumns / TILE_WIDTH, numARows / TILE_WIDTH, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel matrixMultiplyShared<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); wbCheck(cudaDeviceSynchronize()); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU wbCheck(cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory wbCheck(cudaFree(deviceA)); wbCheck(cudaFree(deviceB)); wbCheck(cudaFree(deviceC)); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
d9ed1c4198565d3f3caf859fb3cdf0cdff5f40a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"Header.h" //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ // This.cu file contains the implementation of core CUDA kernels required to implement a deep feed - forward convolutional network. //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ template<int> __global__ void MaxPoolingForward(float *s, float *c, int *Indx, int CRC, int SRC, int Src1, int Src2, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward pass for maxpooling. The input channel size is CRCxCRC, the pooling size is Src1xSrc1, the pooling stride is Src2xSrc2, and the output channel size is SRCxSRC where SRC = CRC/Src2. The index of the maximum output in each pooling square is stored in the Indx matrix to speed up the backward pass through the maxpooling stage. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. Src1:- Pooling size is Src1Src1. Src2:- Pooling stride. CRC:- Input channel size is CRCCRC (output channel size of previous stage before applying maxpooling). SRC:- Output channel size is SRCSRC after applying pooling. c:- Input buffer that conatians all input channels (output channels of the previous layer). Indx:- Output buffer to store the positon of the maximum value in each pooling square to be used by MaxPoolingBackward in the backward pass. s:- Output buffer where this cuda kernel stores the all output channels after applying maximum pooling. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int iss = blockIdx.x*blockDim.x + threadIdx.x; int SN = SRC*SRC; if (iss < SN) { int n = blockIdx.y + NumCh*blockIdx.z; int icx = Src1 * (iss % SRC); int icy = Src1 * (iss / SRC); int is = iss + n*SN; int ic = (n*CRC + icy)*CRC + icx; float max = -9.9e+30f; int index; for (int i = -1; i < Src2 - 1; i++) for (int j = -1; j < Src2 - 1; j++) { int j1 = icx + j, i1 = icy + i, ix = ic + i*CRC + j; if (j1 >= 0 && i1 >= 0 && j1 < CRC && i1 < CRC && c[ix] > max) { index = ix; max = c[index]; } } s[is] = max; Indx[is] = index; } } //============================================================================================================================================================ template<int> __global__ void MaxPoolingBackward(float *c, float *s, int *Indx, int SN, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the backward pass for maxpooling. The only job of this cuda function is to propagate back the error signal through the maxpooling stage, there is no parameter update required because the maxpooling stage has no trainable parameters. The error signal will only be passed to the location of the maximum value in each pooling square using the Indx matrix which stores those maximum values. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. SN:- The channel size on the output side of the mapooling stage, SN = SRCSRC. s:- Input buffer that contains the error signal with respect to the activations of all channels on the output side of the maxpooling stage. Indx:- Input buffer where the positon of the maximum value in each pooling square was stored in the forward pass by MaxPoolingForward. c:- Output buffer where this cuda kernel stores the error signal with respect to the activations of all channels on the input side of the maxpooling stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int iss = blockIdx.x*blockDim.x + threadIdx.x; if (iss < SN) { int n = blockIdx.y + NumCh*blockIdx.z; int is = n*SN + iss; atomicAdd(c + Indx[is], s[is]); } } //============================================================================================================================================================ template<const int BLOCKSIZE> __global__ void GlobalAvgPoolingForward(float *S, float *C, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward pass of the global average pooling stage used after the last convolutional layer. The input channel size is ChSize which is reduced to a single value that is equal to the average of all values in the channel. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- Input Channel size. c:- Input buffer that conatians all input channels (output channels of the previous layer). s:- Output buffer where this cuda kernel stores the all output channels after applying global average pooling. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; int is = threadIdx.x; if (is < ChSize) { int n = blockIdx.x + NumCh*blockIdx.y; int b = blockIdx.y; int ig1 = n*ChSize + is; int ig2 = n + b; float sum = 0.0f; while (ig1 < (n + 1)*ChSize) { sum += C[ig1]; ig1 += BLOCKSIZE; } s[is] = sum; __syncthreads(); int i = blockDim.x / 2; while (i > 0 && is + i < ChSize) { if (is < i) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) S[ig2] = s[0] / ChSize; } } //============================================================================================================================================================ template<const int BLOCKSIZE> __global__ void GlobalAvgPoolingBackward(float *C, float *S, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the backward pass of the global average pooling stage. This stage has no trainable parameters, and therefore this function only propagates back the error signal through the average pooling stage. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- Input Channel size. s:- Input buffer that contains the error signal with respect to the activations of all channels on the output side of the average pooling stage. c:- Output buffer where this cuda kernel stores the error signal with respect to the activations of all channels on the input side of the average pooling stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_temp; int is = threadIdx.x; if (is < ChSize) { int n = blockIdx.x + NumCh*blockIdx.y; int b = blockIdx.y; int ig1 = n*ChSize + is; int ig2 = n + b; if (is == 0)s_temp = S[ig2]; __syncthreads(); float temp = s_temp / ChSize; while (ig1 < (n + 1)*ChSize) { C[ig1] = temp; ig1 += BLOCKSIZE; } } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void Softmax(float *y, int *t, int *Indx, float *mse, float *count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements both the forward and backward passes of the softmax function. Because the softmax stage is the last stage in the network, doing the backward propagation of the error signal immediately after doing the forward pass in the same cuda function is more efficient and adds minimal cost. The implementation is slightly complex because it takes into consideration the possibility of overflow, and the thread block size limitation to 1024 cuda threads per block. */ /**** Argument List ****/ /* Indx:- input buffer that stores the indices or locations of the images in the current batch. t:- input buffer that contains the image labels. count:- output variable to store the total number of images that were correctly classified for the training set or validation set. mse:- output variable to accumulate the mean square error of the training set or validation set y:- input/output buffer where the inputs to the softmax function are stored and where this kernel stores the error signal at the input side of the softmax stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int indx[BLOCKSIZE], s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_t = t[indx1]; } float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0, ix; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; if (tempy[k]>max){ max = tempy[k]; ix = k; } k++; ig += BLOCKSIZE; } s[is] = max; indx[is] = is + BLOCKSIZE*ix; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { if (s[is + i] > s[is]) { s[is] = s[is + i]; indx[is] = indx[is + i]; } } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; float a = 1.0; while (ig < (n + 1)*SIZE) { if (max > 700){ a = (700 / max); tempy[k] *= a; } tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; y[ig] = a*temp; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; if (indx[0] == s_t) atomicAdd(count, 1.0f); } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void SoftmaxInference(float *ys, float *y, int *t, float *mse) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the softmax function used in the inference stage. This function is similar in structure to the Softmax cuda function used in the training phase. However, rather than propagating back the error signal, this function calculates and stores the whole output label for each test image in Ys which then can be analyzed to calculate the mean square error, the confusion matrix, and the classification rate of the test set. */ /**** Argument List ****/ /* t:- input buffer that contains the image labels. mse:- output variable to accumulate the mean square error of the test set. y:- input buffer where the inputs to the softmax function are stored. ys:- output buffer where this kernel stores the whole predicted labels of the test images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0){ s_t = t[n]; } float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; max = fmaxf(max, tempy[k]); k++; ig += BLOCKSIZE; } s[is] = max; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { s[is] = fmaxf(s[is], s[is + i]); } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; while (ig < (n + 1)*SIZE) { if (max > 700)tempy[k] *= 700 / max; tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); ys[ig] += temp; if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void SoftmaxInference2(float *ys, float *y, int *t, int *Indx, float *mse) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is similar to the SoftmaxInference() kernel with the addition of the Indx buffer which is generated earlier by the ReshuffleImagesMT() function to store the indices of batches of images from all tasks in a round robin order. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int s_t, s_indx1; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_indx1 = indx1; s_t = t[indx1]; } __syncthreads(); int ig2 = s_indx1*SIZE + is; float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; max = fmaxf(max, tempy[k]); k++; ig += BLOCKSIZE; } s[is] = max; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { s[is] = fmaxf(s[is], s[is + i]); } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; while (ig < (n + 1)*SIZE) { if (max > 700)tempy[k] *= 700 / max; tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); ys[ig2] += temp; if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; mse1 += temp * temp; k++; ig += BLOCKSIZE; ig2 += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void LogSigmoid(float *y, int *t, int *Indx, float *mse, float *count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward and backward passes of the log sigmoid function 1/(1+exp(-x)). */ /**** Argument List ****/ /* Indx:- input buffer that stores the indices or locations of the images in the current batch. t:- input buffer that contains the image labels. count:- output variable to store the total number of images that were correctly classified for the training set or validation set. mse:- output variable to accumulate the mean square error of the training set or validation set y:- input/output buffer where the inputs to the log sigmoid function are stored and where this kernel stores the error signal at the input side of the softmax stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ int indx[BLOCKSIZE], s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_t = t[indx1]; } float tempy[8], max = -9.9e+30f, mse1; int k = 0, ix; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; if (tempy[k]>max){ max = tempy[k]; ix = k; } k++; ig += BLOCKSIZE; } s[is] = max; indx[is] = is + BLOCKSIZE*ix; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { if (s[is + i] > s[is]) { s[is] = s[is + i]; indx[is] = indx[is + i]; } } __syncthreads(); i /= 2; } //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(1.0f / (1.0f + expf(-tempy[k]))); if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; y[ig] = temp; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; if (indx[0] == s_t) atomicAdd(count, 1.0f); } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void LogSigmoidInference(float *ys, float *y) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the log sigmoid function 1/(1+exp(-x)) used in the Inference stage. It stores the whole output label of each test image in ys. */ /**** Argument List ****/ /* y:- input buffer where the inputs to the softmax function are stored. ys:- output buffer where this kernel stores the whole predicted labels of the test images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is; while (ig < (n + 1)*SIZE) { float temp = float(1.0f / (1.0f + expf(-y[ig]))); ys[ig] += temp; ig += BLOCKSIZE; } } //============================================================================================================================================================ template < int> __global__ void Add_Mtx(float *c, float *a, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is an auxiliary cuda function that adds two GPU matrices c = c + a. */ /**** Argument List ****/ /* a:- input matrix. c:- input/output matrix to store c = c + a.. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { c[i] += a[i]; i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_SGD_WDecay(float *c, float *a, float lr, float lmda, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel updates the parameters in matrix w with the derivatives in matrix dw. The upate equation implements steepest gradient decent with L2 regularization (weight decay). */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. dw:- input buffer that contains the derivatives. w:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { c[i] = (1 - lr*lmda)*c[i] - lr * a[i]; i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_RMSprop1(float *w, float *v, float *dw, float lr, float lmda, int SIZE, int iter) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel updates the parameters in matrix w with the current derivatives in matrix dw and the running averages of the derivatives in matrix v. The update equation implements Root Mean Square Propagation (RMSprop) with L2 regularization (weight decay). The initialization of the running average of the derivatives based on the time step (iteration number) is borrowed from the Adam algorithm. */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. iter:- current training iteration. dw:- input buffer that contains the derivatives. v:- input/output buffer that maintains the running average of the squared derivative per parameter. w:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { float gamma = 0.999f; v[i] = gamma*v[i] + (1 - gamma)*dw[i] * dw[i]; float m = v[i] / (1 - powf(gamma, float(iter))); w[i] = (1 - lr*lmda)*w[i] - lr* dw[i] / (sqrtf(m) + 0.00000001); i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_RMSprop2(float *A, float *V, float *DA, float lr, float lmda, int SIZE, int iter) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* Update_RMSprop2 is similar to Update_RMSprop1, but it is used for smaller matrices. */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. iter:- current training iteration. dA:- input buffer that contains the derivatives. V:- input/output buffer that maintains the running average of the squared derivative per parameter. A:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < SIZE) { float gamma = 0.999f; V[i] = gamma*V[i] + (1 - gamma)*DA[i] * DA[i]; float m = V[i] / (1 - powf(gamma, float(iter))); A[i] = (1 - lr*lmda)*A[i] - lr* DA[i] / (sqrtf(m) + 0.00000001); } } //============================================================================================================================================================ template<int> __global__ void DataAugmentation(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx, unsigned int *Crop, float *RGB, float *Cropf) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements data augmentations which is applied at the start of each training iteration. The augmentation is applied to a batch of images where the Red matrix stores the Red channels for all images, Green matrix stores the Green channels for all images, and the Blue matrix stores the Blue channels for all images. The Height and Width matrices store the height and width of each image in the batch, and the Start matrix stores the starting address of each image in the Red, Green, and Blue buffers. The Indx matrix stores the indices of the images in the current batch selected by the reshuffle algorithm. This function crops a random rectangular with size randomly selected to be between 8% and 100% of the image size and with aspect ratio randomly selected to be between 3/4 and 4/3. Then the cropped rectangular is fitted to the square window size of the network. Random horizontal flipping and color augmentation is also added. Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. Indx:- input buffer that contains the indices of the images in the current batch selected by the reshuffle algorithm. Crop:- input buffer that contains integer random values used to choose the cropping position for each image, and decide on horizontal flipping. RGB:- input buffer that contains 3 random values per image each added to one of the RGB channels for colour augmentation. Cropf:- input buffer that contains 2 random values per image, one decides the amount of scaling, and the other decides the amount of change to the aspect ratio. XCrop:- output buffer to store a batch of data augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_crop[3], s_height, s_width, s_indx; __shared__ float s_RGB[3], s_cropf[2]; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is < 3) { s_crop[is] = Crop[3 * n + is]; s_RGB[is] = RGB[3 * n + is]; if (is < 2) s_cropf[is] = Cropf[2 * n + is]; } if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; } __syncthreads(); int H = s_height; int W = s_width; int Hc, Wc; size_t start = s_start; float a = 0.08f + s_cropf[0] * (1.0f - 0.08f); //float a = 0.1914 + s_cropf[0] * (0.765625 - 0.1914);// float minHW = fminf(float(H), float(W)); float smax = fminf(1.3333333f, (W*W) / (minHW*minHW*a)); float smin = fmaxf(0.75f, (minHW*minHW*a) / (H*H)); float s = smin + s_cropf[1] * (smax - smin); Wc = int(minHW*sqrtf(a*s)); Hc = int(minHW*sqrtf(a / s)); float ScaleH = float(IR1 - 1) / float(Hc - 1); float ScaleW = float(IR1 - 1) / float(Wc - 1); int xd = s_crop[0] % (H + 1 - Hc); int yd = s_crop[1] % (W + 1 - Wc); int flip = s_crop[2] % 10; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = float(ix) / ScaleH + float(xd); if (flip < 5) iys = float(iy) / ScaleW + float(yd); else iys = float(IR1 - 1 - iy) / ScaleW + float(yd); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (s_RGB[0] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (s_RGB[1] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (s_RGB[2] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1]) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1]) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1]) / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<const int EpochT> __global__ void DataAugmentationInference(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *MTX, unsigned int *Flip, int epoch) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements data augmentations for test images in the inference stage. This function can do Single-Crop and Multi-Crop inference based on the EpochTs value which contains the number of crops per image. If EpochTs is equal to 1 then this function will do a single crop-crop inference. If EpochTs >1 this function will do a multi-crop inference. The cropping locations and scales for each test image are stored in the MTX matrix. Each crop will be horizontally flipped with 0.5 probability. The Prediction for each test image is equal to the average predictions of all the crops stored in the MTX matrix. EpochTs is a control variable stored in "ControlVariables.h". Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. MTX:- input buffer that contains the cropping positions and amount of scaling applied to all images in the batch. Flip:- input buffer that contains one random value per image that is used to decide on horizontal flipping. epoch:- represents the crop number in multi-crop inference. XCrop:- output buffer to store a test batch of augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_flip, s_mtx[3]; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is < 3) s_mtx[is] = MTX[n * 3 * EpochT + 3 * epoch + is]; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; if (is == 0) { s_height = Height[n]; s_width = Width[n]; s_start = Start[n]; s_flip = Flip[n]; } __syncthreads(); int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = s_mtx[0]; Hs = Ws*(Hf / Wf); } else { Hs = s_mtx[0]; Ws = Hs*(Wf / Hf); } int xd = s_mtx[1]; int flip = s_flip % 10; int yd = s_mtx[2]; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); if (flip < 5) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<const int EpochT> __global__ void DataAugmentationInference2(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx, int *MTX, unsigned int *Flip, int epoch) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is similar to the DataAugmentationInference() kernel with the addition of the Indx buffer which is generated earlier by the ReshuffleImagesMT() function to store the indices of batches of images from all tasks in a round robin order. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_flip, s_mtx[3], s_indx; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; s_flip = Flip[n]; } __syncthreads(); if (is < 3) s_mtx[is] = MTX[s_indx * 3 * EpochT + 3 * epoch + is]; __syncthreads(); if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = s_mtx[0]; Hs = Ws*(Hf / Wf); } else { Hs = s_mtx[0]; Ws = Hs*(Wf / Hf); } int xd = s_mtx[1]; int flip = s_flip % 10; int yd = s_mtx[2]; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); if (flip < 5) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<int> __global__ void DataAugmentationValidate(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is a simplified version of DataAugmentation used with the validation images. A single central crop with size equal to 224/256 of the maximum square size in the image is used to calculate the validation error. Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. Indx:- input buffer that contains the indices of the images in the current batch selected by the reshuffle algorithm. XCrop:- output buffer to store a batch of data augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_indx; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; int n = blockIdx.y; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; } __syncthreads(); int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = IR1;//1.143f*IR1; // Hs = Ws*(Hf / Wf); } else { Hs = IR1;//1.143f*IR1; // Ws = Hs*(Wf / Hf); } int xd = (Hs - IR1) / 2; int yd = (Ws - IR1) / 2; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); iys = (iy + yd)*((Wf - 1) / (Ws - 1)); //if (flip == 0) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward1a(float *SMU, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel calcualtes the mean and variance per thread block. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- The size of each output channel. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- output buffer where this function stores all means and variances calculated per thread block. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE]; int is = threadIdx.x; int n = blockIdx.x + NumCh*blockIdx.y; int ig = is + n*ChSize; float temp, sum = 0, sum_sq = 0; while (ig < (n + 1)*ChSize) { temp = X[ig]; sum += temp; sum_sq += temp*temp; ig += BLOCKSIZE; } s1[is] = sum; s2[is] = sum_sq; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { SMU[2 * n] = s1[0]; SMU[2 * n + 1] = s2[0]; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward1b(float *SMU, int Ch, int TotalChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel accumulates the means and variances per thread block calculated by BatchNormForward1a to calculate the mean and variance per output channel. The reason for this two stage calculation of the means and variances is caused by the layout of the output channels in the GPU memory used by the convolutional layer implementation of the cudnn.lib library. The layout is CNHW where the order of the tensor inner dimensions is Width, Height, N for image index and Channel. If the layout was NCHW the calculations of the means and variances can easily and efficiently be implemented in a single stage. Anyway splitting the calculation into two consecutive stages adds minimal overhead. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. TotalChSize:- The size of each output channel across all images in the batch TotalChSize = ChSize*BatchSize. SMU:- Output buffer where this function calculates and stores a total of NumCh mean-variance pairs for each of the NumCh output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = blockIdx.x*blockDim.x + threadIdx.x; if (is < Ch) { int ix = 2 * is; int size = 2 * Ch*BatchSize; float sum = 0.0f, sum_sq = 0.0f; while (ix < size) { sum += SMU[ix]; sum_sq += SMU[ix + 1]; ix += 2 * Ch; } float temp = TotalChSize; sum /= temp; SMU[2 * is] = sum; temp = sum_sq / temp - sum*sum; SMU[2 * is + 1] = sqrtf(temp + 0.0001); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward2(float *Y, float *X, float *SMU, float *Param, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel uses the means and variances calculated by BatchNormForward1a and BatchNormForward1b to apply batch normalization to the output channels. */ /**** Argument list***/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- A buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. Y :- output buffer where this function stores the normalized activations of all output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_param[2], s_smu[2]; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_param[is] = Param[2 * n + is]; s_smu[is] = SMU[2 * n + is]; } __syncthreads(); if (ix < ChSize) { int ig = (NumCh*b + n)*ChSize + ix; float temp = (X[ig] - s_smu[0]) / s_smu[1]; temp = s_param[0] * temp + s_param[1]; Y[ig] = fmaxf(temp, 0); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward2(float *DParam, float *Derv, float *Param, float *SMU, float *DY, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is the first stage that propagates the error signal back through the batch normalization stage. DY contains the error signal at the output side of the BN stage. This kernel calculates the derivatives for the BN trainable parameters in DParam, and partially propagates the error signal back to the inputs of the BN stage and stores these intermediate values in Derv. */ /****Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- output buffer where this function calculates and stores a total of NumCh pairs of intermediate values that will used by the next stage to propagate back the error signal to the inputs of the BN stage. DParam:- output buffer where this function calculates and stores the derivatives of beta and gamma, the trainable parameters of BN. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE], s3[BLOCKSIZE]; __shared__ float s_smu[2], s_param[2]; int is = threadIdx.x; int n = blockIdx.x; int b = blockIdx.y; int n2 = n + NumCh*b; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_param[is] = Param[2 * n + is]; } __syncthreads(); float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1], gamma = s_param[0], beta = s_param[1]; int ig = is + n2*ChSize; float temp1, temp2, temp, sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f; float a = gamma*inv_sigma; float b1 = beta - a*mu; while (ig < (n2 + 1)*ChSize) { temp2 = X[ig]; temp = a*temp2 + b1; if (temp>0) { temp1 = DY[ig]; sum1 += temp1; sum2 += temp1*(temp2 - mu) * inv_sigma; sum3 += temp1*(temp2 - mu); } else { DY[ig] = 0; } ig += BLOCKSIZE; } s1[is] = sum1; s2[is] = sum2; s3[is] = sum3; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; s3[is] += s3[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { atomicAdd(DParam + 2 * n, s2[0]); atomicAdd(DParam + 2 * n + 1, s1[0]); atomicAdd(Derv + 2 * n, gamma*s1[0]); atomicAdd(Derv + 2 * n + 1, gamma*s3[0]); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward1(float *X, float *DY, float *Param, float *SMU, float *Derv, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel completes the back propagation of the error signal through the BN stage. */ /****Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- input buffer which contains a total of NumCh pairs of intermediate values that will used by the this function to propagate back ther error signal to the inputs (X) of the BN stage. X :- output buffer where this function calculates and stores the error signal with respect to the inputs of the BN stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_smu[2], s_derv[2], s_gamma; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_derv[is] = Derv[2 * n + is]; if (is == 0) { s_gamma = Param[2 * n]; } } __syncthreads(); if (ix < ChSize) { float temp; float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1]; float derv1 = s_derv[0], derv2 = s_derv[1], inv_m = 1.0f / (BatchSize*ChSize); int ig = (NumCh*b + n)*ChSize + ix; temp = inv_sigma*(s_gamma*DY[ig] - derv1*inv_m - (X[ig] - mu)*derv2*inv_m*inv_sigma*inv_sigma); X[ig] = temp; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward22(float *Y, float *X, float *Y0, bool *F, float *SMU, float *Param, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormForward22 is similar to BatchNormForward2, but it has an additional input Y0, which is an input from a residual connection. Also this kernel stores the sign of the output in F to be used in the backward pass. Therefore, when the current stage (Layer) has an additional input coming from a previous stage through a residual connection, BatchNormForward22 is used instead of BatchNormForward2. */ /****Argument list****/ /* NumCh :- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize :- The size of each output channel. Param :- A buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. Y0 :- input buffer that contains the activations of the jump-ahead residual connections. SMU :- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. F :- output buffer that holds the signs of each output element in Y0 which will be used in the backward pass to propagate the error signal through the ReLUs. Y :- output buffer where this function stores the normalized activations of all output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_param[2], s_smu[2]; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_param[is] = Param[2 * n + is]; s_smu[is] = SMU[2 * n + is]; } __syncthreads(); if (ix < ChSize) { int ig = (NumCh*b + n)*ChSize + ix; float temp = (X[ig] - s_smu[0]) / s_smu[1]; temp = s_param[0] * temp + s_param[1] + Y0[ig]; temp = fmaxf(temp, 0); Y[ig] = temp; F[ig] = (temp>0) ? 1 : 0; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward22(float *DParam, float *Derv, float *Param, float *SMU, float *DY, bool *F, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormBackward22 is similar to BatchNormBackward2, but it has an additional input F, which is the sign of the BN output in forward pass. Therefore, when the current stage (Layer) has an additional input coming from a previous stage through a residual connection, BatchNormBackward22 is used instead of BatchNormBackward2. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. F :- input buffer that is used to propagate the error signal back through the ReLU activation function. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- output buffer where this function calculates and stores a total of NumCh pairs of intermediate values that will used by the next stage to propagate back the error signal to the inputs of the BN stage. DParam:- output buffer where this function calculates and stores the derivatives of beta and gamma, the trainable parameters of BN. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE], s3[BLOCKSIZE]; __shared__ float s_smu[2], s_param[2]; int is = threadIdx.x; int n = blockIdx.x; int b = blockIdx.y; int n2 = n + NumCh*b; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_param[is] = Param[2 * n + is]; } __syncthreads(); float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1], gamma = s_param[0]; int ig = is + n2*ChSize; float temp1, temp2, sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f; while (ig < (n2 + 1)*ChSize) { temp2 = X[ig]; if (F[ig]>0) { temp1 = DY[ig]; sum1 += temp1; sum2 += temp1*(temp2 - mu) * inv_sigma; sum3 += temp1*(temp2 - mu); } else { DY[ig] = 0; } ig += BLOCKSIZE; } s1[is] = sum1; s2[is] = sum2; s3[is] = sum3; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; s3[is] += s3[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { atomicAdd(DParam + 2 * n, s2[0]); atomicAdd(DParam + 2 * n + 1, s1[0]); atomicAdd(Derv + 2 * n, gamma*s1[0]); atomicAdd(Derv + 2 * n + 1, gamma*s3[0]); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForwardT1b(float *SMU, float *SMUs, int NumCh, int count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormForwardT1b is similar to BatchNormForward1b, but it has an extra output SMUs to accumulate the means and variances from all training images. This kernel will only be executed after the last training epoch. After training stops these accumulated values will averaged by AdjustFixedMeansStds. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize TotalChSize:- The size of each output channel across all images in the batch TotalChSize = ChSize*BatchSize. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. SMUs:- Output buffer where this function calculates and stores a total of NumCh fixed mean-variance pairs that will be used in the inference stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = blockIdx.x*blockDim.x + threadIdx.x; if (is < NumCh) { int ix = 2 * is; int size = 2 * NumCh*BatchSize; float sum = 0.0f, sum_sq = 0.0f; while (ix < size) { sum += SMU[ix]; sum_sq += SMU[ix + 1]; ix += 2 * NumCh; } float temp = count; sum /= temp; SMU[2 * is] = sum; SMUs[2 * is] += sum; temp = sum_sq / temp - sum*sum; SMU[2 * is + 1] = sqrtf(temp + 0.0001); SMUs[2 * is + 1] += temp; } } //============================================================================================================================================================ template<int> __global__ void AdjustFixedMeansStds(float *SMU, int NumCh, int TrainSizeM) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel uses the accumulated values of means and variances calculated by BatchNormForward1b, to calculated the fixed means and variances that will be used in the inference stage. */ /**** Argument list****/ /* NumCh:- Number of output channels. SMU:- input buffer that conatins the accumulated means and variances for all training data. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int ig = threadIdx.x + blockIdx.x*blockDim.x; float temp = float(TrainSizeM / (NumTasks*BatchSize)); if (ig < NumCh) { float temp_value = SMU[ig] / temp; if (ig % 2 == 1) { temp_value = sqrtf(temp_value + 0.0001); } SMU[ig] = temp_value; } } //============================================================================================================================================================ template<int> __global__ void RGBrandPCA(float *RGBrand, float *rand1, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements calculates a set of 3 stochastic values per image to be added to the 3 RGB channels for the purpose of colour augmentation. For each random variable in the input buffer rand1 this kernel will calculate a corresponding stochastic value in RGBrand based on PCA analysis of the RGB pixel values of all the training set. */ /****Argument List****/ /* rand1:- input buffer of random values drawn from a normal distribution with zero mean and unity variance. RGBrand:- output buffer to store the */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = threadIdx.x; int ig = is + blockIdx.x*blockDim.x; if (ig < SIZE) { float alpha1 = rand1[3 * ig] * 6.9514; float alpha2 = rand1[3 * ig + 1] * 17.3739; float alpha3 = rand1[3 * ig + 2] * 305.65817; float vr1 = -0.4000, vr2 = -0.7061, vr3 = 0.58426; float vg1 = 0.80526, vg2 = 0.0336, vg3 = 0.59196; float vb1 = -0.4376, vb2 = 0.7073, vb3 = 0.55517; RGBrand[3 * ig] = vr1*alpha1 + vr2*alpha2 + vr3*alpha3; RGBrand[3 * ig + 1] = vg1*alpha1 + vg2*alpha2 + vg3*alpha3; RGBrand[3 * ig + 2] = vb1*alpha1 + vb2*alpha2 + vb3*alpha3; } } //============================================================================================================================================================
d9ed1c4198565d3f3caf859fb3cdf0cdff5f40a2.cu
#include"Header.h" //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ // This.cu file contains the implementation of core CUDA kernels required to implement a deep feed - forward convolutional network. //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ //============================================================================================================================================================ template<int> __global__ void MaxPoolingForward(float *s, float *c, int *Indx, int CRC, int SRC, int Src1, int Src2, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward pass for maxpooling. The input channel size is CRCxCRC, the pooling size is Src1xSrc1, the pooling stride is Src2xSrc2, and the output channel size is SRCxSRC where SRC = CRC/Src2. The index of the maximum output in each pooling square is stored in the Indx matrix to speed up the backward pass through the maxpooling stage. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. Src1:- Pooling size is Src1×Src1. Src2:- Pooling stride. CRC:- Input channel size is CRC×CRC (output channel size of previous stage before applying maxpooling). SRC:- Output channel size is SRC×SRC after applying pooling. c:- Input buffer that conatians all input channels (output channels of the previous layer). Indx:- Output buffer to store the positon of the maximum value in each pooling square to be used by MaxPoolingBackward in the backward pass. s:- Output buffer where this cuda kernel stores the all output channels after applying maximum pooling. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int iss = blockIdx.x*blockDim.x + threadIdx.x; int SN = SRC*SRC; if (iss < SN) { int n = blockIdx.y + NumCh*blockIdx.z; int icx = Src1 * (iss % SRC); int icy = Src1 * (iss / SRC); int is = iss + n*SN; int ic = (n*CRC + icy)*CRC + icx; float max = -9.9e+30f; int index; for (int i = -1; i < Src2 - 1; i++) for (int j = -1; j < Src2 - 1; j++) { int j1 = icx + j, i1 = icy + i, ix = ic + i*CRC + j; if (j1 >= 0 && i1 >= 0 && j1 < CRC && i1 < CRC && c[ix] > max) { index = ix; max = c[index]; } } s[is] = max; Indx[is] = index; } } //============================================================================================================================================================ template<int> __global__ void MaxPoolingBackward(float *c, float *s, int *Indx, int SN, int NumCh) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the backward pass for maxpooling. The only job of this cuda function is to propagate back the error signal through the maxpooling stage, there is no parameter update required because the maxpooling stage has no trainable parameters. The error signal will only be passed to the location of the maximum value in each pooling square using the Indx matrix which stores those maximum values. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. SN:- The channel size on the output side of the mapooling stage, SN = SRC×SRC. s:- Input buffer that contains the error signal with respect to the activations of all channels on the output side of the maxpooling stage. Indx:- Input buffer where the positon of the maximum value in each pooling square was stored in the forward pass by MaxPoolingForward. c:- Output buffer where this cuda kernel stores the error signal with respect to the activations of all channels on the input side of the maxpooling stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int iss = blockIdx.x*blockDim.x + threadIdx.x; if (iss < SN) { int n = blockIdx.y + NumCh*blockIdx.z; int is = n*SN + iss; atomicAdd(c + Indx[is], s[is]); } } //============================================================================================================================================================ template<const int BLOCKSIZE> __global__ void GlobalAvgPoolingForward(float *S, float *C, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward pass of the global average pooling stage used after the last convolutional layer. The input channel size is ChSize which is reduced to a single value that is equal to the average of all values in the channel. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- Input Channel size. c:- Input buffer that conatians all input channels (output channels of the previous layer). s:- Output buffer where this cuda kernel stores the all output channels after applying global average pooling. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; int is = threadIdx.x; if (is < ChSize) { int n = blockIdx.x + NumCh*blockIdx.y; int b = blockIdx.y; int ig1 = n*ChSize + is; int ig2 = n + b; float sum = 0.0f; while (ig1 < (n + 1)*ChSize) { sum += C[ig1]; ig1 += BLOCKSIZE; } s[is] = sum; __syncthreads(); int i = blockDim.x / 2; while (i > 0 && is + i < ChSize) { if (is < i) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) S[ig2] = s[0] / ChSize; } } //============================================================================================================================================================ template<const int BLOCKSIZE> __global__ void GlobalAvgPoolingBackward(float *C, float *S, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the backward pass of the global average pooling stage. This stage has no trainable parameters, and therefore this function only propagates back the error signal through the average pooling stage. */ /**** Argument List ****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- Input Channel size. s:- Input buffer that contains the error signal with respect to the activations of all channels on the output side of the average pooling stage. c:- Output buffer where this cuda kernel stores the error signal with respect to the activations of all channels on the input side of the average pooling stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_temp; int is = threadIdx.x; if (is < ChSize) { int n = blockIdx.x + NumCh*blockIdx.y; int b = blockIdx.y; int ig1 = n*ChSize + is; int ig2 = n + b; if (is == 0)s_temp = S[ig2]; __syncthreads(); float temp = s_temp / ChSize; while (ig1 < (n + 1)*ChSize) { C[ig1] = temp; ig1 += BLOCKSIZE; } } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void Softmax(float *y, int *t, int *Indx, float *mse, float *count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements both the forward and backward passes of the softmax function. Because the softmax stage is the last stage in the network, doing the backward propagation of the error signal immediately after doing the forward pass in the same cuda function is more efficient and adds minimal cost. The implementation is slightly complex because it takes into consideration the possibility of overflow, and the thread block size limitation to 1024 cuda threads per block. */ /**** Argument List ****/ /* Indx:- input buffer that stores the indices or locations of the images in the current batch. t:- input buffer that contains the image labels. count:- output variable to store the total number of images that were correctly classified for the training set or validation set. mse:- output variable to accumulate the mean square error of the training set or validation set y:- input/output buffer where the inputs to the softmax function are stored and where this kernel stores the error signal at the input side of the softmax stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int indx[BLOCKSIZE], s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_t = t[indx1]; } float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0, ix; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; if (tempy[k]>max){ max = tempy[k]; ix = k; } k++; ig += BLOCKSIZE; } s[is] = max; indx[is] = is + BLOCKSIZE*ix; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { if (s[is + i] > s[is]) { s[is] = s[is + i]; indx[is] = indx[is + i]; } } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; float a = 1.0; while (ig < (n + 1)*SIZE) { if (max > 700){ a = (700 / max); tempy[k] *= a; } tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; y[ig] = a*temp; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; if (indx[0] == s_t) atomicAdd(count, 1.0f); } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void SoftmaxInference(float *ys, float *y, int *t, float *mse) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the softmax function used in the inference stage. This function is similar in structure to the Softmax cuda function used in the training phase. However, rather than propagating back the error signal, this function calculates and stores the whole output label for each test image in Ys which then can be analyzed to calculate the mean square error, the confusion matrix, and the classification rate of the test set. */ /**** Argument List ****/ /* t:- input buffer that contains the image labels. mse:- output variable to accumulate the mean square error of the test set. y:- input buffer where the inputs to the softmax function are stored. ys:- output buffer where this kernel stores the whole predicted labels of the test images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0){ s_t = t[n]; } float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; max = fmaxf(max, tempy[k]); k++; ig += BLOCKSIZE; } s[is] = max; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { s[is] = fmaxf(s[is], s[is + i]); } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; while (ig < (n + 1)*SIZE) { if (max > 700)tempy[k] *= 700 / max; tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); ys[ig] += temp; if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void SoftmaxInference2(float *ys, float *y, int *t, int *Indx, float *mse) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is similar to the SoftmaxInference() kernel with the addition of the Indx buffer which is generated earlier by the ReshuffleImagesMT() function to store the indices of batches of images from all tasks in a round robin order. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ double sD[BLOCKSIZE]; __shared__ int s_t, s_indx1; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_indx1 = indx1; s_t = t[indx1]; } __syncthreads(); int ig2 = s_indx1*SIZE + is; float tempy[8], max = -9.9e+30f, mse1; double tempyD[8], sum; int k = 0; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; max = fmaxf(max, tempy[k]); k++; ig += BLOCKSIZE; } s[is] = max; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { s[is] = fmaxf(s[is], s[is + i]); } __syncthreads(); i /= 2; } max = s[0]; //---------------------------------------------------------------------------------------------- ig = ig1; sum = 0.0; k = 0; while (ig < (n + 1)*SIZE) { if (max > 700)tempy[k] *= 700 / max; tempyD[k] = exp(double(tempy[k])); sum += tempyD[k]; k++; ig += BLOCKSIZE; } sD[is] = sum; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) sD[is] += sD[is + i]; __syncthreads(); i /= 2; } sum = sD[0]; //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(tempyD[k] / (sum + 2.0e-20)); ys[ig2] += temp; if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; mse1 += temp * temp; k++; ig += BLOCKSIZE; ig2 += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void LogSigmoid(float *y, int *t, int *Indx, float *mse, float *count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the forward and backward passes of the log sigmoid function 1/(1+exp(-x)). */ /**** Argument List ****/ /* Indx:- input buffer that stores the indices or locations of the images in the current batch. t:- input buffer that contains the image labels. count:- output variable to store the total number of images that were correctly classified for the training set or validation set. mse:- output variable to accumulate the mean square error of the training set or validation set y:- input/output buffer where the inputs to the log sigmoid function are stored and where this kernel stores the error signal at the input side of the softmax stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s[BLOCKSIZE]; __shared__ int indx[BLOCKSIZE], s_t; int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is, ig1 = ig; if (is == 0) { int indx1 = Indx[n]; s_t = t[indx1]; } float tempy[8], max = -9.9e+30f, mse1; int k = 0, ix; while (ig < (n + 1)*SIZE) { tempy[k] = y[ig]; if (tempy[k]>max){ max = tempy[k]; ix = k; } k++; ig += BLOCKSIZE; } s[is] = max; indx[is] = is + BLOCKSIZE*ix; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) { if (s[is + i] > s[is]) { s[is] = s[is + i]; indx[is] = indx[is + i]; } } __syncthreads(); i /= 2; } //---------------------------------------------------------------------------------------------- ig = ig1; mse1 = 0.0f; k = 0; while (ig < (n + 1)*SIZE) { float temp = float(1.0f / (1.0f + expf(-tempy[k]))); if (is + k*BLOCKSIZE == s_t) temp -= 1.0f; y[ig] = temp; mse1 += temp * temp; k++; ig += BLOCKSIZE; } s[is] = mse1; __syncthreads(); i = BLOCKSIZE / 2; while (i > 0) { if (is < i && is + i < SIZE) s[is] += s[is + i]; __syncthreads(); i /= 2; } if (is == 0) { mse[n] += s[0]; if (indx[0] == s_t) atomicAdd(count, 1.0f); } } //============================================================================================================================================================ template < const int SIZE, const int BLOCKSIZE > __global__ void LogSigmoidInference(float *ys, float *y) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements the log sigmoid function 1/(1+exp(-x)) used in the Inference stage. It stores the whole output label of each test image in ys. */ /**** Argument List ****/ /* y:- input buffer where the inputs to the softmax function are stored. ys:- output buffer where this kernel stores the whole predicted labels of the test images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = threadIdx.x; int n = blockIdx.x; int ig = n*SIZE + is; while (ig < (n + 1)*SIZE) { float temp = float(1.0f / (1.0f + expf(-y[ig]))); ys[ig] += temp; ig += BLOCKSIZE; } } //============================================================================================================================================================ template < int> __global__ void Add_Mtx(float *c, float *a, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is an auxiliary cuda function that adds two GPU matrices c = c + a. */ /**** Argument List ****/ /* a:- input matrix. c:- input/output matrix to store c = c + a.. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { c[i] += a[i]; i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_SGD_WDecay(float *c, float *a, float lr, float lmda, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel updates the parameters in matrix w with the derivatives in matrix dw. The upate equation implements steepest gradient decent with L2 regularization (weight decay). */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. dw:- input buffer that contains the derivatives. w:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { c[i] = (1 - lr*lmda)*c[i] - lr * a[i]; i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_RMSprop1(float *w, float *v, float *dw, float lr, float lmda, int SIZE, int iter) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel updates the parameters in matrix w with the current derivatives in matrix dw and the running averages of the derivatives in matrix v. The update equation implements Root Mean Square Propagation (RMSprop) with L2 regularization (weight decay). The initialization of the running average of the derivatives based on the time step (iteration number) is borrowed from the Adam algorithm. */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. iter:- current training iteration. dw:- input buffer that contains the derivatives. v:- input/output buffer that maintains the running average of the squared derivative per parameter. w:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x*gridDim.x; while (i < SIZE) { float gamma = 0.999f; v[i] = gamma*v[i] + (1 - gamma)*dw[i] * dw[i]; float m = v[i] / (1 - powf(gamma, float(iter))); w[i] = (1 - lr*lmda)*w[i] - lr* dw[i] / (sqrtf(m) + 0.00000001); i += stride; } } //============================================================================================================================================================ template < int> __global__ void Update_RMSprop2(float *A, float *V, float *DA, float lr, float lmda, int SIZE, int iter) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* Update_RMSprop2 is similar to Update_RMSprop1, but it is used for smaller matrices. */ /**** Argument List ****/ /* lr:- learning rate. lmda:- weight decay parameter. iter:- current training iteration. dA:- input buffer that contains the derivatives. V:- input/output buffer that maintains the running average of the squared derivative per parameter. A:- input/output buffer to store the updated values of the trainable parameters in matrix w. SIZE:- size of the input/output matrices. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < SIZE) { float gamma = 0.999f; V[i] = gamma*V[i] + (1 - gamma)*DA[i] * DA[i]; float m = V[i] / (1 - powf(gamma, float(iter))); A[i] = (1 - lr*lmda)*A[i] - lr* DA[i] / (sqrtf(m) + 0.00000001); } } //============================================================================================================================================================ template<int> __global__ void DataAugmentation(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx, unsigned int *Crop, float *RGB, float *Cropf) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements data augmentations which is applied at the start of each training iteration. The augmentation is applied to a batch of images where the Red matrix stores the Red channels for all images, Green matrix stores the Green channels for all images, and the Blue matrix stores the Blue channels for all images. The Height and Width matrices store the height and width of each image in the batch, and the Start matrix stores the starting address of each image in the Red, Green, and Blue buffers. The Indx matrix stores the indices of the images in the current batch selected by the reshuffle algorithm. This function crops a random rectangular with size randomly selected to be between 8% and 100% of the image size and with aspect ratio randomly selected to be between 3/4 and 4/3. Then the cropped rectangular is fitted to the square window size of the network. Random horizontal flipping and color augmentation is also added. Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. Indx:- input buffer that contains the indices of the images in the current batch selected by the reshuffle algorithm. Crop:- input buffer that contains integer random values used to choose the cropping position for each image, and decide on horizontal flipping. RGB:- input buffer that contains 3 random values per image each added to one of the RGB channels for colour augmentation. Cropf:- input buffer that contains 2 random values per image, one decides the amount of scaling, and the other decides the amount of change to the aspect ratio. XCrop:- output buffer to store a batch of data augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_crop[3], s_height, s_width, s_indx; __shared__ float s_RGB[3], s_cropf[2]; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is < 3) { s_crop[is] = Crop[3 * n + is]; s_RGB[is] = RGB[3 * n + is]; if (is < 2) s_cropf[is] = Cropf[2 * n + is]; } if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; } __syncthreads(); int H = s_height; int W = s_width; int Hc, Wc; size_t start = s_start; float a = 0.08f + s_cropf[0] * (1.0f - 0.08f); //float a = 0.1914 + s_cropf[0] * (0.765625 - 0.1914);// float minHW = fminf(float(H), float(W)); float smax = fminf(1.3333333f, (W*W) / (minHW*minHW*a)); float smin = fmaxf(0.75f, (minHW*minHW*a) / (H*H)); float s = smin + s_cropf[1] * (smax - smin); Wc = int(minHW*sqrtf(a*s)); Hc = int(minHW*sqrtf(a / s)); float ScaleH = float(IR1 - 1) / float(Hc - 1); float ScaleW = float(IR1 - 1) / float(Wc - 1); int xd = s_crop[0] % (H + 1 - Hc); int yd = s_crop[1] % (W + 1 - Wc); int flip = s_crop[2] % 10; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = float(ix) / ScaleH + float(xd); if (flip < 5) iys = float(iy) / ScaleW + float(yd); else iys = float(IR1 - 1 - iy) / ScaleW + float(yd); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (s_RGB[0] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (s_RGB[1] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (s_RGB[2] + t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = (s_RGB[0] + Red[ia1]) / 255.0f - 0.5f; XCrop[ic + ISize1] = (s_RGB[1] + Green[ia1]) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (s_RGB[2] + Blue[ia1]) / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<const int EpochT> __global__ void DataAugmentationInference(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *MTX, unsigned int *Flip, int epoch) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements data augmentations for test images in the inference stage. This function can do Single-Crop and Multi-Crop inference based on the EpochTs value which contains the number of crops per image. If EpochTs is equal to 1 then this function will do a single crop-crop inference. If EpochTs >1 this function will do a multi-crop inference. The cropping locations and scales for each test image are stored in the MTX matrix. Each crop will be horizontally flipped with 0.5 probability. The Prediction for each test image is equal to the average predictions of all the crops stored in the MTX matrix. EpochTs is a control variable stored in "ControlVariables.h". Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. MTX:- input buffer that contains the cropping positions and amount of scaling applied to all images in the batch. Flip:- input buffer that contains one random value per image that is used to decide on horizontal flipping. epoch:- represents the crop number in multi-crop inference. XCrop:- output buffer to store a test batch of augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_flip, s_mtx[3]; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is < 3) s_mtx[is] = MTX[n * 3 * EpochT + 3 * epoch + is]; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; if (is == 0) { s_height = Height[n]; s_width = Width[n]; s_start = Start[n]; s_flip = Flip[n]; } __syncthreads(); int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = s_mtx[0]; Hs = Ws*(Hf / Wf); } else { Hs = s_mtx[0]; Ws = Hs*(Wf / Hf); } int xd = s_mtx[1]; int flip = s_flip % 10; int yd = s_mtx[2]; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); if (flip < 5) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<const int EpochT> __global__ void DataAugmentationInference2(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx, int *MTX, unsigned int *Flip, int epoch) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is similar to the DataAugmentationInference() kernel with the addition of the Indx buffer which is generated earlier by the ReshuffleImagesMT() function to store the indices of batches of images from all tasks in a round robin order. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_flip, s_mtx[3], s_indx; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; int n = blockIdx.y; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; s_flip = Flip[n]; } __syncthreads(); if (is < 3) s_mtx[is] = MTX[s_indx * 3 * EpochT + 3 * epoch + is]; __syncthreads(); if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = s_mtx[0]; Hs = Ws*(Hf / Wf); } else { Hs = s_mtx[0]; Ws = Hs*(Wf / Hf); } int xd = s_mtx[1]; int flip = s_flip % 10; int yd = s_mtx[2]; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); if (flip < 5) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<int> __global__ void DataAugmentationValidate(float *XCrop, unsigned char *Red, unsigned char *Green, unsigned char *Blue, unsigned int * Height, unsigned int *Width, size_t *Start, int *Indx) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is a simplified version of DataAugmentation used with the validation images. A single central crop with size equal to 224/256 of the maximum square size in the image is used to calculate the validation error. Bilinear interpolation is used for scaling. */ /**** Argument List ****/ /* Height:- input buffer that holds the height of all images in the batch. Width:- input buffer that holds the width of all images in the batch. Start:- input buffer that holds the starting position of all images in the batch. Red:- input buffer where the red input channels for all images in the batch are stored. Green:- input buffer where the green input channels for all images in the batch are stored. Blue:- input buffer where the blue input channels for all images in the batch are stored. Indx:- input buffer that contains the indices of the images in the current batch selected by the reshuffle algorithm. XCrop:- output buffer to store a batch of data augmented images. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ unsigned int s_height, s_width, s_indx; __shared__ size_t s_start; int is = threadIdx.x; int ii = is + blockIdx.x*blockDim.x; if (ii < ISize1) { int ix = ii % IR1; int iy = ii / IR1; int n = blockIdx.y; if (is == 0) { s_indx = Indx[n]; s_height = Height[s_indx]; s_width = Width[s_indx]; s_start = Start[s_indx]; } __syncthreads(); int H = s_height; int W = s_width; int Hs, Ws; float Hf = H, Wf = W; size_t start = s_start; if (H > W) { Ws = IR1;//1.143f*IR1; // Hs = Ws*(Hf / Wf); } else { Hs = IR1;//1.143f*IR1; // Ws = Hs*(Wf / Hf); } int xd = (Hs - IR1) / 2; int yd = (Ws - IR1) / 2; int ic = ix + IR1*iy + 3 * n*ISize1; float ixs, iys; int ixs1, ixs2, iys1, iys2; ixs = (ix + xd)*((Hf - 1) / (Hs - 1)); iys = (iy + yd)*((Wf - 1) / (Ws - 1)); //if (flip == 0) iys = (iy + yd)*((Wf - 1) / (Ws - 1)); else iys = (IR1 - 1 - iy + yd)*((Wf - 1) / (Ws - 1)); //else iys = (Ws - 1 - iy - yd)*((Wf - 1) / (Ws - 1)); ixs1 = floorf(ixs); ixs2 = ceilf(ixs); iys1 = floorf(iys); iys2 = ceilf(iys); if (iys1 < iys2) { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; int ia3 = ixs1 + iys2*H + start; int ia4 = ixs2 + iys2*H + start; float t1 = Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1); float t2 = Red[ia3] * (ixs2 - ixs) + Red[ia4] * (ixs - ixs1); XCrop[ic] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1); t2 = Green[ia3] * (ixs2 - ixs) + Green[ia4] * (ixs - ixs1); XCrop[ic + ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; t1 = Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1); t2 = Blue[ia3] * (ixs2 - ixs) + Blue[ia4] * (ixs - ixs1); XCrop[ic + 2 * ISize1] = (t1*(iys2 - iys) + t2*(iys - iys1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs1 + iys2*H + start; XCrop[ic] = (Red[ia1] * (iys2 - iys) + Red[ia2] * (iys - iys1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (iys2 - iys) + Green[ia2] * (iys - iys1)) / 255.0f - 0.5f;; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (iys2 - iys) + Blue[ia2] * (iys - iys1)) / 255.0f - 0.5f; } } else { if (ixs1 < ixs2) { int ia1 = ixs1 + iys1*H + start; int ia2 = ixs2 + iys1*H + start; XCrop[ic] = (Red[ia1] * (ixs2 - ixs) + Red[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + ISize1] = (Green[ia1] * (ixs2 - ixs) + Green[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = (Blue[ia1] * (ixs2 - ixs) + Blue[ia2] * (ixs - ixs1)) / 255.0f - 0.5f; } else { int ia1 = ixs1 + iys1*H + start; XCrop[ic] = Red[ia1] / 255.0f - 0.5f; XCrop[ic + ISize1] = Green[ia1] / 255.0f - 0.5f; XCrop[ic + 2 * ISize1] = Blue[ia1] / 255.0f - 0.5f; } } } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward1a(float *SMU, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel calcualtes the mean and variance per thread block. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. ChSize:- The size of each output channel. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- output buffer where this function stores all means and variances calculated per thread block. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE]; int is = threadIdx.x; int n = blockIdx.x + NumCh*blockIdx.y; int ig = is + n*ChSize; float temp, sum = 0, sum_sq = 0; while (ig < (n + 1)*ChSize) { temp = X[ig]; sum += temp; sum_sq += temp*temp; ig += BLOCKSIZE; } s1[is] = sum; s2[is] = sum_sq; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { SMU[2 * n] = s1[0]; SMU[2 * n + 1] = s2[0]; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward1b(float *SMU, int Ch, int TotalChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel accumulates the means and variances per thread block calculated by BatchNormForward1a to calculate the mean and variance per output channel. The reason for this two stage calculation of the means and variances is caused by the layout of the output channels in the GPU memory used by the convolutional layer implementation of the cudnn.lib library. The layout is CNHW where the order of the tensor inner dimensions is Width, Height, N for image index and Channel. If the layout was NCHW the calculations of the means and variances can easily and efficiently be implemented in a single stage. Anyway splitting the calculation into two consecutive stages adds minimal overhead. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize. TotalChSize:- The size of each output channel across all images in the batch TotalChSize = ChSize*BatchSize. SMU:- Output buffer where this function calculates and stores a total of NumCh mean-variance pairs for each of the NumCh output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = blockIdx.x*blockDim.x + threadIdx.x; if (is < Ch) { int ix = 2 * is; int size = 2 * Ch*BatchSize; float sum = 0.0f, sum_sq = 0.0f; while (ix < size) { sum += SMU[ix]; sum_sq += SMU[ix + 1]; ix += 2 * Ch; } float temp = TotalChSize; sum /= temp; SMU[2 * is] = sum; temp = sum_sq / temp - sum*sum; SMU[2 * is + 1] = sqrtf(temp + 0.0001); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward2(float *Y, float *X, float *SMU, float *Param, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel uses the means and variances calculated by BatchNormForward1a and BatchNormForward1b to apply batch normalization to the output channels. */ /**** Argument list***/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- A buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. Y :- output buffer where this function stores the normalized activations of all output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_param[2], s_smu[2]; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_param[is] = Param[2 * n + is]; s_smu[is] = SMU[2 * n + is]; } __syncthreads(); if (ix < ChSize) { int ig = (NumCh*b + n)*ChSize + ix; float temp = (X[ig] - s_smu[0]) / s_smu[1]; temp = s_param[0] * temp + s_param[1]; Y[ig] = fmaxf(temp, 0); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward2(float *DParam, float *Derv, float *Param, float *SMU, float *DY, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel is the first stage that propagates the error signal back through the batch normalization stage. DY contains the error signal at the output side of the BN stage. This kernel calculates the derivatives for the BN trainable parameters in DParam, and partially propagates the error signal back to the inputs of the BN stage and stores these intermediate values in Derv. */ /****Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- output buffer where this function calculates and stores a total of NumCh pairs of intermediate values that will used by the next stage to propagate back the error signal to the inputs of the BN stage. DParam:- output buffer where this function calculates and stores the derivatives of beta and gamma, the trainable parameters of BN. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE], s3[BLOCKSIZE]; __shared__ float s_smu[2], s_param[2]; int is = threadIdx.x; int n = blockIdx.x; int b = blockIdx.y; int n2 = n + NumCh*b; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_param[is] = Param[2 * n + is]; } __syncthreads(); float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1], gamma = s_param[0], beta = s_param[1]; int ig = is + n2*ChSize; float temp1, temp2, temp, sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f; float a = gamma*inv_sigma; float b1 = beta - a*mu; while (ig < (n2 + 1)*ChSize) { temp2 = X[ig]; temp = a*temp2 + b1; if (temp>0) { temp1 = DY[ig]; sum1 += temp1; sum2 += temp1*(temp2 - mu) * inv_sigma; sum3 += temp1*(temp2 - mu); } else { DY[ig] = 0; } ig += BLOCKSIZE; } s1[is] = sum1; s2[is] = sum2; s3[is] = sum3; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; s3[is] += s3[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { atomicAdd(DParam + 2 * n, s2[0]); atomicAdd(DParam + 2 * n + 1, s1[0]); atomicAdd(Derv + 2 * n, gamma*s1[0]); atomicAdd(Derv + 2 * n + 1, gamma*s3[0]); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward1(float *X, float *DY, float *Param, float *SMU, float *Derv, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel completes the back propagation of the error signal through the BN stage. */ /****Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- input buffer which contains a total of NumCh pairs of intermediate values that will used by the this function to propagate back ther error signal to the inputs (X) of the BN stage. X :- output buffer where this function calculates and stores the error signal with respect to the inputs of the BN stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_smu[2], s_derv[2], s_gamma; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_derv[is] = Derv[2 * n + is]; if (is == 0) { s_gamma = Param[2 * n]; } } __syncthreads(); if (ix < ChSize) { float temp; float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1]; float derv1 = s_derv[0], derv2 = s_derv[1], inv_m = 1.0f / (BatchSize*ChSize); int ig = (NumCh*b + n)*ChSize + ix; temp = inv_sigma*(s_gamma*DY[ig] - derv1*inv_m - (X[ig] - mu)*derv2*inv_m*inv_sigma*inv_sigma); X[ig] = temp; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForward22(float *Y, float *X, float *Y0, bool *F, float *SMU, float *Param, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormForward22 is similar to BatchNormForward2, but it has an additional input Y0, which is an input from a residual connection. Also this kernel stores the sign of the output in F to be used in the backward pass. Therefore, when the current stage (Layer) has an additional input coming from a previous stage through a residual connection, BatchNormForward22 is used instead of BatchNormForward2. */ /****Argument list****/ /* NumCh :- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize :- The size of each output channel. Param :- A buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. Y0 :- input buffer that contains the activations of the jump-ahead residual connections. SMU :- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. F :- output buffer that holds the signs of each output element in Y0 which will be used in the backward pass to propagate the error signal through the ReLUs. Y :- output buffer where this function stores the normalized activations of all output channels. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s_param[2], s_smu[2]; int is = threadIdx.x; int ix = blockIdx.x*BLOCKSIZE + is; int n = blockIdx.y; int b = blockIdx.z; if (is < 2) { s_param[is] = Param[2 * n + is]; s_smu[is] = SMU[2 * n + is]; } __syncthreads(); if (ix < ChSize) { int ig = (NumCh*b + n)*ChSize + ix; float temp = (X[ig] - s_smu[0]) / s_smu[1]; temp = s_param[0] * temp + s_param[1] + Y0[ig]; temp = fmaxf(temp, 0); Y[ig] = temp; F[ig] = (temp>0) ? 1 : 0; } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormBackward22(float *DParam, float *Derv, float *Param, float *SMU, float *DY, bool *F, float *X, int NumCh, int ChSize) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormBackward22 is similar to BatchNormBackward2, but it has an additional input F, which is the sign of the BN output in forward pass. Therefore, when the current stage (Layer) has an additional input coming from a previous stage through a residual connection, BatchNormBackward22 is used instead of BatchNormBackward2. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize ChSize:- The size of each output channel. Param:- input buffer that contains the BN trainable parameters beta and gamma. There is a total of NumCh beta-gamma pairs, one for each of the NumCh output channels. X :- input buffer that contains the activations of all output channels before applying BN. F :- input buffer that is used to propagate the error signal back through the ReLU activation function. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. DY :- input buffer that contains the error signal at the outputs of the BN stage. The derivatives of the lost function with respect to the outputs of BN. Derv:- output buffer where this function calculates and stores a total of NumCh pairs of intermediate values that will used by the next stage to propagate back the error signal to the inputs of the BN stage. DParam:- output buffer where this function calculates and stores the derivatives of beta and gamma, the trainable parameters of BN. */ //------------------------------------------------------------------------------------------------------------------------------------------------- __shared__ float s1[BLOCKSIZE], s2[BLOCKSIZE], s3[BLOCKSIZE]; __shared__ float s_smu[2], s_param[2]; int is = threadIdx.x; int n = blockIdx.x; int b = blockIdx.y; int n2 = n + NumCh*b; if (is < 2) { s_smu[is] = SMU[2 * n + is]; s_param[is] = Param[2 * n + is]; } __syncthreads(); float mu = s_smu[0], inv_sigma = 1.0f / s_smu[1], gamma = s_param[0]; int ig = is + n2*ChSize; float temp1, temp2, sum1 = 0.0f, sum2 = 0.0f, sum3 = 0.0f; while (ig < (n2 + 1)*ChSize) { temp2 = X[ig]; if (F[ig]>0) { temp1 = DY[ig]; sum1 += temp1; sum2 += temp1*(temp2 - mu) * inv_sigma; sum3 += temp1*(temp2 - mu); } else { DY[ig] = 0; } ig += BLOCKSIZE; } s1[is] = sum1; s2[is] = sum2; s3[is] = sum3; __syncthreads(); int i = BLOCKSIZE / 2; while (i > 0) { if (is < i) { s1[is] += s1[is + i]; s2[is] += s2[is + i]; s3[is] += s3[is + i]; } __syncthreads(); i /= 2; } if (is == 0) { atomicAdd(DParam + 2 * n, s2[0]); atomicAdd(DParam + 2 * n + 1, s1[0]); atomicAdd(Derv + 2 * n, gamma*s1[0]); atomicAdd(Derv + 2 * n + 1, gamma*s3[0]); } } //============================================================================================================================================================ template<int BLOCKSIZE> __global__ void BatchNormForwardT1b(float *SMU, float *SMUs, int NumCh, int count) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* BatchNormForwardT1b is similar to BatchNormForward1b, but it has an extra output SMUs to accumulate the means and variances from all training images. This kernel will only be executed after the last training epoch. After training stops these accumulated values will averaged by AdjustFixedMeansStds. */ /**** Argument list****/ /* NumCh:- Number of output channels per image. Total number of channels in a convolutional layer is NumCh*BatchSize TotalChSize:- The size of each output channel across all images in the batch TotalChSize = ChSize*BatchSize. SMU:- input buffer that contains a means-variance pair per output channel, and each pair will be used to normalize the activations of the corresponding output channel. SMUs:- Output buffer where this function calculates and stores a total of NumCh fixed mean-variance pairs that will be used in the inference stage. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = blockIdx.x*blockDim.x + threadIdx.x; if (is < NumCh) { int ix = 2 * is; int size = 2 * NumCh*BatchSize; float sum = 0.0f, sum_sq = 0.0f; while (ix < size) { sum += SMU[ix]; sum_sq += SMU[ix + 1]; ix += 2 * NumCh; } float temp = count; sum /= temp; SMU[2 * is] = sum; SMUs[2 * is] += sum; temp = sum_sq / temp - sum*sum; SMU[2 * is + 1] = sqrtf(temp + 0.0001); SMUs[2 * is + 1] += temp; } } //============================================================================================================================================================ template<int> __global__ void AdjustFixedMeansStds(float *SMU, int NumCh, int TrainSizeM) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel uses the accumulated values of means and variances calculated by BatchNormForward1b, to calculated the fixed means and variances that will be used in the inference stage. */ /**** Argument list****/ /* NumCh:- Number of output channels. SMU:- input buffer that conatins the accumulated means and variances for all training data. */ //------------------------------------------------------------------------------------------------------------------------------------------------- int ig = threadIdx.x + blockIdx.x*blockDim.x; float temp = float(TrainSizeM / (NumTasks*BatchSize)); if (ig < NumCh) { float temp_value = SMU[ig] / temp; if (ig % 2 == 1) { temp_value = sqrtf(temp_value + 0.0001); } SMU[ig] = temp_value; } } //============================================================================================================================================================ template<int> __global__ void RGBrandPCA(float *RGBrand, float *rand1, int SIZE) { //------------------------------------------------------------------------------------------------------------------------------------------------- /* This cuda kernel implements calculates a set of 3 stochastic values per image to be added to the 3 RGB channels for the purpose of colour augmentation. For each random variable in the input buffer rand1 this kernel will calculate a corresponding stochastic value in RGBrand based on PCA analysis of the RGB pixel values of all the training set. */ /****Argument List****/ /* rand1:- input buffer of random values drawn from a normal distribution with zero mean and unity variance. RGBrand:- output buffer to store the */ //------------------------------------------------------------------------------------------------------------------------------------------------- int is = threadIdx.x; int ig = is + blockIdx.x*blockDim.x; if (ig < SIZE) { float alpha1 = rand1[3 * ig] * 6.9514; float alpha2 = rand1[3 * ig + 1] * 17.3739; float alpha3 = rand1[3 * ig + 2] * 305.65817; float vr1 = -0.4000, vr2 = -0.7061, vr3 = 0.58426; float vg1 = 0.80526, vg2 = 0.0336, vg3 = 0.59196; float vb1 = -0.4376, vb2 = 0.7073, vb3 = 0.55517; RGBrand[3 * ig] = vr1*alpha1 + vr2*alpha2 + vr3*alpha3; RGBrand[3 * ig + 1] = vg1*alpha1 + vg2*alpha2 + vg3*alpha3; RGBrand[3 * ig + 2] = vb1*alpha1 + vb2*alpha2 + vb3*alpha3; } } //============================================================================================================================================================
0c64b9fdc9b82b962a8678598651636122518dcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include "helper_functions.h" #include <stdio.h> #include "IC_GN.cuh" __global__ void computeICGN(const float* input_dPXY, const float* input_mdR, const float* input_mdRx, const float* input_mdRy, float m_dNormDeltaP, const float* input_mdT, const float* input_mBicubic, const int* input_iU, const int* input_iV, int m_iNumberY, int m_iNumberX, int m_iSubsetH, int m_iSubsetW, int m_iWidth, int m_iHeight, int m_iSubsetY, int m_iSubsetX, int m_iMaxiteration, float* output_dP, int* dm_iIterationNum) /*Input: all the const variables Output: deformation P matrix Strategy: Each block compute one of the 21*21 POIs, and within each block 32*32 threads compute other needed computations */ { unsigned int blockID = blockIdx.y * gridDim.x + blockIdx.x; //Row * dim + Col unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; // Shared memory for the shared variables of each block __shared__ float m_dU, m_dV, m_dUx, m_dUy, m_dVx, m_dVy; //Initial guess __shared__ float m_dDU, m_dDV, m_dDUx, m_dDUy, m_dDVx, m_dDVy; __shared__ float m_dSubAveR, m_dSubNorR, m_dSubAveT, m_dSubNorT; //Used for atomicAdd __shared__ float m_dP[6], m_dDP[6], m_dPXY[2], m_dNumerator[6]; __shared__ float m_dWarp[3][3]; __shared__ float m_dHessian[6][6], m_dInvHessian[6][6]; __shared__ float m_dTemp; //__shared__ int m_iIteration; // Local memory for each thread use float m_dSubsetR, m_dSubsetAveR, m_dSubsetT, m_dSubsetAveT; float m_dJacobian[2][6], m_dRDescent[6], m_dHessianXY[6][6]; int m_iTemp, m_iTempX, m_iTempY; float m_dTempX, m_dTempY; float m_dWarpX, m_dWarpY; float m_dError; //Load the shared variables to shared memory if(threadIdx.x ==0 && threadIdx.y==0){ m_dU = float(input_iU[blockID]); m_dDU = 0.0; m_dV = float(input_iV[blockID]); m_dDV = 0.0; m_dUx = 0.0; m_dUx = 0.0; m_dUy = 0.0; m_dUy = 0.0; m_dVx = 0.0; m_dVx = 0.0; m_dVy = 0.0; m_dVy = 0.0; m_dP[0] = m_dU; m_dP[3] = m_dV; m_dP[1] = m_dUx; m_dP[4] = m_dVx; m_dP[2] = m_dUy; m_dP[5] = m_dVy; m_dWarp[0][0] = 1 + m_dUx; m_dWarp[0][1] = m_dUy; m_dWarp[0][2] = m_dU; m_dWarp[1][0] = m_dVx; m_dWarp[1][1] = 1 + m_dVy; m_dWarp[1][2] = m_dV; m_dWarp[2][0] = 0; m_dWarp[2][1] = 0; m_dWarp[2][2] = 1; m_dPXY[0] = input_dPXY[blockID*2+0]; m_dPXY[1] = input_dPXY[blockID*2+1]; m_dSubAveR=0.0; m_dSubNorR=0.0; m_dSubAveT=0.0; m_dSubNorT=0.0; dm_iIterationNum[blockID] = 20; } if(tx<6 && ty<6){ if(tx == ty){ m_dInvHessian[ty][tx] = 1.0; m_dHessian[ty][tx] = 0.0; m_dNumerator[tx] = 0.0; m_dDP[tx] = 0.0; } else{ m_dInvHessian[ty][tx] = 0.0; m_dHessian[ty][tx] = 0.0; m_dNumerator[tx] = 0.0; m_dDP[tx] = 0.0; } } __syncthreads(); // Evaluate the Jacbian dW/dp at (x, 0); m_dJacobian[0][0] = 1; m_dJacobian[0][1] = tx - m_iSubsetX; m_dJacobian[0][2] = ty - m_iSubsetY; m_dJacobian[0][3] = 0; m_dJacobian[0][4] = 0; m_dJacobian[0][5] = 0; m_dJacobian[1][0] = 0; m_dJacobian[1][1] = 0; m_dJacobian[1][2] = 0; m_dJacobian[1][3] = 1; m_dJacobian[1][4] = tx - m_iSubsetX; m_dJacobian[1][5] = ty - m_iSubsetY; //Compute the steepest descent image and Hessian matrix for(unsigned int i=0; i<6; i++){ m_dRDescent[i] = input_mdRx[int(m_dPXY[0] - m_iSubsetY+ty)*m_iWidth+int(m_dPXY[1] - m_iSubsetX+tx)] * m_dJacobian[0][i] + input_mdRy[int(m_dPXY[0] - m_iSubsetY+ty)*m_iWidth+int(m_dPXY[1] - m_iSubsetX+tx)] * m_dJacobian[1][i]; } for(unsigned int i=0; i<6; i++){ for(unsigned int j=0; j<6; j++){ m_dHessianXY[i][j] = m_dRDescent[i] * m_dRDescent[j]; m_dHessian[i][j] += m_dHessianXY[i][j]; } } //Fill the intensity values in the subset R m_dSubsetR = input_mdR[int(m_dPXY[0] - m_iSubsetY + ty)*m_iWidth+int(m_dPXY[1] - m_iSubsetX + tx)]; __syncthreads(); atomicAdd(&m_dSubAveR, (m_dSubsetR/float(m_iSubsetH*m_iSubsetW))); __syncthreads(); m_dSubsetAveR = m_dSubsetR - m_dSubAveR; __syncthreads(); atomicAdd(&m_dSubNorR, pow(m_dSubsetAveR,2)); __syncthreads(); //Invert the Hessian matrix using the first 36 threads if(tx ==0 && ty ==0){ m_dSubNorR = sqrt(m_dSubNorR); __syncthreads(); for(int l=0; l<6; l++){ m_iTemp = l; for(int m=l+1; m<6; m++){ if(m_dHessian[m][l] > m_dHessian[m_iTemp][l]){ m_iTemp = m; } } //Swap the row which has maximum lth column element if(m_iTemp != l){ for(int k=0; k<6; k++){ m_dTemp = m_dHessian[l][k]; m_dHessian[l][k] = m_dHessian[m_iTemp][k]; m_dHessian[m_iTemp][k] = m_dTemp; m_dTemp = m_dInvHessian[l][k]; m_dInvHessian[l][k] = m_dInvHessian[m_iTemp][k]; m_dInvHessian[m_iTemp][k] = m_dTemp; } } //Row oerpation to form required identity matrix for(int m=0; m<6; m++){ m_dTemp = m_dHessian[m][l]; if(m != l){ for(int n=0; n<6; n++){ m_dInvHessian[m][n] -= m_dInvHessian[l][n] * m_dTemp / m_dHessian[l][l]; m_dHessian[m][n] -= m_dHessian[l][n] * m_dTemp / m_dHessian[l][l]; } } else{ for(int n=0; n<6; n++){ m_dInvHessian[m][n] /= m_dTemp; m_dHessian[m][n] /= m_dTemp; } } } } } __syncthreads(); //Perform iterative optimization, within the maximum iteration number for(int m_iIteration =0; m_iIteration < m_iMaxiteration; m_iIteration++){ if(tx ==0 && ty==0){ m_dSubAveT=0.0; m_dSubNorT=0.0; m_dNumerator[0] = 0.0; m_dNumerator[1] = 0.0; m_dNumerator[2] = 0.0; m_dNumerator[3] = 0.0; m_dNumerator[4] = 0.0; m_dNumerator[5] = 0.0; } __syncthreads(); m_dWarpX = m_dPXY[1] + m_dWarp[0][0] * (tx - m_iSubsetX) + m_dWarp[0][1] * (ty - m_iSubsetY) + m_dWarp[0][2]; m_dWarpY = m_dPXY[0] + m_dWarp[1][0] * (tx - m_iSubsetX) + m_dWarp[1][1] * (ty - m_iSubsetY) + m_dWarp[1][2]; m_iTempX = int(m_dWarpX); m_iTempY = int(m_dWarpY); if((m_iTempX >=0) && ( m_iTempY >=0) && (m_iTempX<m_iWidth) && (m_iTempY<m_iHeight)){ m_dTempX = m_dWarpX - float(m_iTempX); m_dTempY = m_dWarpY - float(m_iTempY); //if it is integer-pixel location ,feed the intensity of T into subset T if((m_dTempX ==0.0) && (m_dTempY ==0.0)){ m_dSubsetT = input_mdT[m_iTempY * m_iWidth + m_iTempX]; } else{ m_dSubsetT =0.0; for(int k=0; k<4; k++){ for(int n=0; n<4; n++){ m_dSubsetT += input_mBicubic[((m_iTempY*m_iWidth+m_iTempX)*4+k)*4+n] * pow(m_dTempY, k) * pow(m_dTempX,n); } } } __syncthreads(); atomicAdd(&m_dSubAveT, m_dSubsetT/float(m_iSubsetH*m_iSubsetW)); __syncthreads(); m_dSubsetAveT = m_dSubsetT - m_dSubAveT; __syncthreads(); } else{ break; } atomicAdd(&m_dSubNorT, pow(m_dSubsetAveT,2)); __syncthreads(); if(tx==0 && ty==0){ m_dSubNorT = sqrt(m_dSubNorT); } //Compute the error image m_dError = (m_dSubNorR / m_dSubNorT) * m_dSubsetAveT - m_dSubsetAveR; __syncthreads(); if(tx<6 && ty==0){ atomicAdd(&(m_dNumerator[tx]),(m_dRDescent[tx] * m_dError)); } __syncthreads(); //Compute DeltaP if(tx==0 && ty==0){ for(int k=0; k<6; k++){ m_dDP[k] = 0.0; for(int n=0; n<6; n++){ m_dDP[k] += (m_dInvHessian[k][n] * m_dNumerator[n]); } } __syncthreads(); m_dDU = m_dDP[0]; m_dDUx = m_dDP[1]; m_dDUy = m_dDP[2]; m_dDV = m_dDP[3]; m_dDVx = m_dDP[4]; m_dDVy = m_dDP[5]; m_dTemp = (1+m_dDUx) * (1+m_dDVy) - m_dDUy * m_dDVx; //W(P) <- W(P) O W(DP)^-1 m_dWarp[0][0] = ((1 + m_dUx) * (1 + m_dDVy) - m_dUy * m_dDVx) / m_dTemp; m_dWarp[0][1] = (m_dUy * (1 + m_dDUx) - (1 + m_dUx) * m_dDUy) / m_dTemp; m_dWarp[0][2] = m_dU + (m_dUy * (m_dDU * m_dDVx - m_dDV - m_dDV * m_dDUx) - (1 + m_dUx) * (m_dDU * m_dDVy + m_dDU - m_dDUy * m_dDV)) / m_dTemp; m_dWarp[1][0] = (m_dVx * (1 + m_dDVy) - (1 + m_dVy) * m_dDVx) / m_dTemp; m_dWarp[1][1] = ((1 + m_dVy) * (1 + m_dDUx) - m_dVx * m_dDUy) / m_dTemp; m_dWarp[1][2] = m_dV + ((1 + m_dVy) * (m_dDU * m_dDVx - m_dDV - m_dDV * m_dDUx) - m_dVx * (m_dDU * m_dDVy + m_dDU - m_dDUy * m_dDV)) / m_dTemp; m_dWarp[2][0] = 0; m_dWarp[2][1] = 0; m_dWarp[2][2] = 1; // Update DeltaP m_dP[0] = m_dWarp[0][2]; m_dP[1] = m_dWarp[0][0] - 1; m_dP[2] = m_dWarp[0][1]; m_dP[3] = m_dWarp[1][2]; m_dP[4] = m_dWarp[1][0]; m_dP[5] = m_dWarp[1][1] - 1; m_dU = m_dP[0]; m_dUx = m_dP[1]; m_dUy = m_dP[2]; m_dV = m_dP[3]; m_dVx = m_dP[4]; m_dVy = m_dP[5]; __syncthreads(); } } __syncthreads(); if(threadIdx.x ==0 && threadIdx.y == 0){ output_dP[blockID*6+0] = m_dP[0]; output_dP[blockID*6+1] = m_dP[1]; output_dP[blockID*6+2] = m_dP[2]; output_dP[blockID*6+3] = m_dP[3]; output_dP[blockID*6+4] = m_dP[4]; output_dP[blockID*6+5] = m_dP[5]; dm_iIterationNum[blockID] = 5; } } void launch_ICGN(const float* input_dPXY, const float* input_mdR, const float* input_mdRx, const float* input_mdRy, const float& m_dNormDeltaP, const float* input_mdT, const float* input_mBicubic, const int* input_iU, const int* input_iV, const int& m_iNumberY, const int& m_iNumberX, const int& m_iSubsetH, const int& m_iSubsetW, const int& m_iWidth, const int& m_iHeight, const int& m_iSubsetY, const int& m_iSubsetX, const int& m_iMaxiteration, float* output_dP, int* dm_iIterationNum, float& time) { StopWatchWin icgn; dim3 dimGrid(m_iNumberY, m_iNumberX,1); dim3 dimBlock(m_iSubsetH, m_iSubsetW,1); icgn.start(); hipLaunchKernelGGL(( computeICGN), dim3(dimGrid),dim3(dimBlock), 0, 0, input_dPXY, input_mdR, input_mdRx, input_mdRy, m_dNormDeltaP, input_mdT, input_mBicubic, input_iU, input_iV, m_iNumberY, m_iNumberX, m_iSubsetH, m_iSubsetW, m_iWidth, m_iHeight, m_iSubsetY, m_iSubsetX, m_iMaxiteration, output_dP,dm_iIterationNum); icgn.stop(); time = icgn.getTime(); }
0c64b9fdc9b82b962a8678598651636122518dcc.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include "helper_functions.h" #include <stdio.h> #include "IC_GN.cuh" __global__ void computeICGN(const float* input_dPXY, const float* input_mdR, const float* input_mdRx, const float* input_mdRy, float m_dNormDeltaP, const float* input_mdT, const float* input_mBicubic, const int* input_iU, const int* input_iV, int m_iNumberY, int m_iNumberX, int m_iSubsetH, int m_iSubsetW, int m_iWidth, int m_iHeight, int m_iSubsetY, int m_iSubsetX, int m_iMaxiteration, float* output_dP, int* dm_iIterationNum) /*Input: all the const variables Output: deformation P matrix Strategy: Each block compute one of the 21*21 POIs, and within each block 32*32 threads compute other needed computations */ { unsigned int blockID = blockIdx.y * gridDim.x + blockIdx.x; //Row * dim + Col unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; // Shared memory for the shared variables of each block __shared__ float m_dU, m_dV, m_dUx, m_dUy, m_dVx, m_dVy; //Initial guess __shared__ float m_dDU, m_dDV, m_dDUx, m_dDUy, m_dDVx, m_dDVy; __shared__ float m_dSubAveR, m_dSubNorR, m_dSubAveT, m_dSubNorT; //Used for atomicAdd __shared__ float m_dP[6], m_dDP[6], m_dPXY[2], m_dNumerator[6]; __shared__ float m_dWarp[3][3]; __shared__ float m_dHessian[6][6], m_dInvHessian[6][6]; __shared__ float m_dTemp; //__shared__ int m_iIteration; // Local memory for each thread use float m_dSubsetR, m_dSubsetAveR, m_dSubsetT, m_dSubsetAveT; float m_dJacobian[2][6], m_dRDescent[6], m_dHessianXY[6][6]; int m_iTemp, m_iTempX, m_iTempY; float m_dTempX, m_dTempY; float m_dWarpX, m_dWarpY; float m_dError; //Load the shared variables to shared memory if(threadIdx.x ==0 && threadIdx.y==0){ m_dU = float(input_iU[blockID]); m_dDU = 0.0; m_dV = float(input_iV[blockID]); m_dDV = 0.0; m_dUx = 0.0; m_dUx = 0.0; m_dUy = 0.0; m_dUy = 0.0; m_dVx = 0.0; m_dVx = 0.0; m_dVy = 0.0; m_dVy = 0.0; m_dP[0] = m_dU; m_dP[3] = m_dV; m_dP[1] = m_dUx; m_dP[4] = m_dVx; m_dP[2] = m_dUy; m_dP[5] = m_dVy; m_dWarp[0][0] = 1 + m_dUx; m_dWarp[0][1] = m_dUy; m_dWarp[0][2] = m_dU; m_dWarp[1][0] = m_dVx; m_dWarp[1][1] = 1 + m_dVy; m_dWarp[1][2] = m_dV; m_dWarp[2][0] = 0; m_dWarp[2][1] = 0; m_dWarp[2][2] = 1; m_dPXY[0] = input_dPXY[blockID*2+0]; m_dPXY[1] = input_dPXY[blockID*2+1]; m_dSubAveR=0.0; m_dSubNorR=0.0; m_dSubAveT=0.0; m_dSubNorT=0.0; dm_iIterationNum[blockID] = 20; } if(tx<6 && ty<6){ if(tx == ty){ m_dInvHessian[ty][tx] = 1.0; m_dHessian[ty][tx] = 0.0; m_dNumerator[tx] = 0.0; m_dDP[tx] = 0.0; } else{ m_dInvHessian[ty][tx] = 0.0; m_dHessian[ty][tx] = 0.0; m_dNumerator[tx] = 0.0; m_dDP[tx] = 0.0; } } __syncthreads(); // Evaluate the Jacbian dW/dp at (x, 0); m_dJacobian[0][0] = 1; m_dJacobian[0][1] = tx - m_iSubsetX; m_dJacobian[0][2] = ty - m_iSubsetY; m_dJacobian[0][3] = 0; m_dJacobian[0][4] = 0; m_dJacobian[0][5] = 0; m_dJacobian[1][0] = 0; m_dJacobian[1][1] = 0; m_dJacobian[1][2] = 0; m_dJacobian[1][3] = 1; m_dJacobian[1][4] = tx - m_iSubsetX; m_dJacobian[1][5] = ty - m_iSubsetY; //Compute the steepest descent image and Hessian matrix for(unsigned int i=0; i<6; i++){ m_dRDescent[i] = input_mdRx[int(m_dPXY[0] - m_iSubsetY+ty)*m_iWidth+int(m_dPXY[1] - m_iSubsetX+tx)] * m_dJacobian[0][i] + input_mdRy[int(m_dPXY[0] - m_iSubsetY+ty)*m_iWidth+int(m_dPXY[1] - m_iSubsetX+tx)] * m_dJacobian[1][i]; } for(unsigned int i=0; i<6; i++){ for(unsigned int j=0; j<6; j++){ m_dHessianXY[i][j] = m_dRDescent[i] * m_dRDescent[j]; m_dHessian[i][j] += m_dHessianXY[i][j]; } } //Fill the intensity values in the subset R m_dSubsetR = input_mdR[int(m_dPXY[0] - m_iSubsetY + ty)*m_iWidth+int(m_dPXY[1] - m_iSubsetX + tx)]; __syncthreads(); atomicAdd(&m_dSubAveR, (m_dSubsetR/float(m_iSubsetH*m_iSubsetW))); __syncthreads(); m_dSubsetAveR = m_dSubsetR - m_dSubAveR; __syncthreads(); atomicAdd(&m_dSubNorR, pow(m_dSubsetAveR,2)); __syncthreads(); //Invert the Hessian matrix using the first 36 threads if(tx ==0 && ty ==0){ m_dSubNorR = sqrt(m_dSubNorR); __syncthreads(); for(int l=0; l<6; l++){ m_iTemp = l; for(int m=l+1; m<6; m++){ if(m_dHessian[m][l] > m_dHessian[m_iTemp][l]){ m_iTemp = m; } } //Swap the row which has maximum lth column element if(m_iTemp != l){ for(int k=0; k<6; k++){ m_dTemp = m_dHessian[l][k]; m_dHessian[l][k] = m_dHessian[m_iTemp][k]; m_dHessian[m_iTemp][k] = m_dTemp; m_dTemp = m_dInvHessian[l][k]; m_dInvHessian[l][k] = m_dInvHessian[m_iTemp][k]; m_dInvHessian[m_iTemp][k] = m_dTemp; } } //Row oerpation to form required identity matrix for(int m=0; m<6; m++){ m_dTemp = m_dHessian[m][l]; if(m != l){ for(int n=0; n<6; n++){ m_dInvHessian[m][n] -= m_dInvHessian[l][n] * m_dTemp / m_dHessian[l][l]; m_dHessian[m][n] -= m_dHessian[l][n] * m_dTemp / m_dHessian[l][l]; } } else{ for(int n=0; n<6; n++){ m_dInvHessian[m][n] /= m_dTemp; m_dHessian[m][n] /= m_dTemp; } } } } } __syncthreads(); //Perform iterative optimization, within the maximum iteration number for(int m_iIteration =0; m_iIteration < m_iMaxiteration; m_iIteration++){ if(tx ==0 && ty==0){ m_dSubAveT=0.0; m_dSubNorT=0.0; m_dNumerator[0] = 0.0; m_dNumerator[1] = 0.0; m_dNumerator[2] = 0.0; m_dNumerator[3] = 0.0; m_dNumerator[4] = 0.0; m_dNumerator[5] = 0.0; } __syncthreads(); m_dWarpX = m_dPXY[1] + m_dWarp[0][0] * (tx - m_iSubsetX) + m_dWarp[0][1] * (ty - m_iSubsetY) + m_dWarp[0][2]; m_dWarpY = m_dPXY[0] + m_dWarp[1][0] * (tx - m_iSubsetX) + m_dWarp[1][1] * (ty - m_iSubsetY) + m_dWarp[1][2]; m_iTempX = int(m_dWarpX); m_iTempY = int(m_dWarpY); if((m_iTempX >=0) && ( m_iTempY >=0) && (m_iTempX<m_iWidth) && (m_iTempY<m_iHeight)){ m_dTempX = m_dWarpX - float(m_iTempX); m_dTempY = m_dWarpY - float(m_iTempY); //if it is integer-pixel location ,feed the intensity of T into subset T if((m_dTempX ==0.0) && (m_dTempY ==0.0)){ m_dSubsetT = input_mdT[m_iTempY * m_iWidth + m_iTempX]; } else{ m_dSubsetT =0.0; for(int k=0; k<4; k++){ for(int n=0; n<4; n++){ m_dSubsetT += input_mBicubic[((m_iTempY*m_iWidth+m_iTempX)*4+k)*4+n] * pow(m_dTempY, k) * pow(m_dTempX,n); } } } __syncthreads(); atomicAdd(&m_dSubAveT, m_dSubsetT/float(m_iSubsetH*m_iSubsetW)); __syncthreads(); m_dSubsetAveT = m_dSubsetT - m_dSubAveT; __syncthreads(); } else{ break; } atomicAdd(&m_dSubNorT, pow(m_dSubsetAveT,2)); __syncthreads(); if(tx==0 && ty==0){ m_dSubNorT = sqrt(m_dSubNorT); } //Compute the error image m_dError = (m_dSubNorR / m_dSubNorT) * m_dSubsetAveT - m_dSubsetAveR; __syncthreads(); if(tx<6 && ty==0){ atomicAdd(&(m_dNumerator[tx]),(m_dRDescent[tx] * m_dError)); } __syncthreads(); //Compute DeltaP if(tx==0 && ty==0){ for(int k=0; k<6; k++){ m_dDP[k] = 0.0; for(int n=0; n<6; n++){ m_dDP[k] += (m_dInvHessian[k][n] * m_dNumerator[n]); } } __syncthreads(); m_dDU = m_dDP[0]; m_dDUx = m_dDP[1]; m_dDUy = m_dDP[2]; m_dDV = m_dDP[3]; m_dDVx = m_dDP[4]; m_dDVy = m_dDP[5]; m_dTemp = (1+m_dDUx) * (1+m_dDVy) - m_dDUy * m_dDVx; //W(P) <- W(P) O W(DP)^-1 m_dWarp[0][0] = ((1 + m_dUx) * (1 + m_dDVy) - m_dUy * m_dDVx) / m_dTemp; m_dWarp[0][1] = (m_dUy * (1 + m_dDUx) - (1 + m_dUx) * m_dDUy) / m_dTemp; m_dWarp[0][2] = m_dU + (m_dUy * (m_dDU * m_dDVx - m_dDV - m_dDV * m_dDUx) - (1 + m_dUx) * (m_dDU * m_dDVy + m_dDU - m_dDUy * m_dDV)) / m_dTemp; m_dWarp[1][0] = (m_dVx * (1 + m_dDVy) - (1 + m_dVy) * m_dDVx) / m_dTemp; m_dWarp[1][1] = ((1 + m_dVy) * (1 + m_dDUx) - m_dVx * m_dDUy) / m_dTemp; m_dWarp[1][2] = m_dV + ((1 + m_dVy) * (m_dDU * m_dDVx - m_dDV - m_dDV * m_dDUx) - m_dVx * (m_dDU * m_dDVy + m_dDU - m_dDUy * m_dDV)) / m_dTemp; m_dWarp[2][0] = 0; m_dWarp[2][1] = 0; m_dWarp[2][2] = 1; // Update DeltaP m_dP[0] = m_dWarp[0][2]; m_dP[1] = m_dWarp[0][0] - 1; m_dP[2] = m_dWarp[0][1]; m_dP[3] = m_dWarp[1][2]; m_dP[4] = m_dWarp[1][0]; m_dP[5] = m_dWarp[1][1] - 1; m_dU = m_dP[0]; m_dUx = m_dP[1]; m_dUy = m_dP[2]; m_dV = m_dP[3]; m_dVx = m_dP[4]; m_dVy = m_dP[5]; __syncthreads(); } } __syncthreads(); if(threadIdx.x ==0 && threadIdx.y == 0){ output_dP[blockID*6+0] = m_dP[0]; output_dP[blockID*6+1] = m_dP[1]; output_dP[blockID*6+2] = m_dP[2]; output_dP[blockID*6+3] = m_dP[3]; output_dP[blockID*6+4] = m_dP[4]; output_dP[blockID*6+5] = m_dP[5]; dm_iIterationNum[blockID] = 5; } } void launch_ICGN(const float* input_dPXY, const float* input_mdR, const float* input_mdRx, const float* input_mdRy, const float& m_dNormDeltaP, const float* input_mdT, const float* input_mBicubic, const int* input_iU, const int* input_iV, const int& m_iNumberY, const int& m_iNumberX, const int& m_iSubsetH, const int& m_iSubsetW, const int& m_iWidth, const int& m_iHeight, const int& m_iSubsetY, const int& m_iSubsetX, const int& m_iMaxiteration, float* output_dP, int* dm_iIterationNum, float& time) { StopWatchWin icgn; dim3 dimGrid(m_iNumberY, m_iNumberX,1); dim3 dimBlock(m_iSubsetH, m_iSubsetW,1); icgn.start(); computeICGN<<<dimGrid,dimBlock>>>(input_dPXY, input_mdR, input_mdRx, input_mdRy, m_dNormDeltaP, input_mdT, input_mBicubic, input_iU, input_iV, m_iNumberY, m_iNumberX, m_iSubsetH, m_iSubsetW, m_iWidth, m_iHeight, m_iSubsetY, m_iSubsetX, m_iMaxiteration, output_dP,dm_iIterationNum); icgn.stop(); time = icgn.getTime(); }
c69a0b841c7ec4320574b387ce8b0b95ef1bbf6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cuda.h> #include <iostream> #include "radarprocess.cuh" namespace chrono{ namespace sensor{ //__global__ void radar_angle_kernel(float* imgIn, // float* imgOut, // int w, // int h, // float hfov, // float max_v_angle, // float min_v_angle){ // int index = blockDim.x * blockIdx.x + threadIdx.x; // if (index < w * h){ // int hIndex = index % w; // int vIndex = index / w; // // float azimuth = (hIndex / (float)(w)) * hfov - hfov / 2.; // float elevation = (vIndex / (float)(h)) * (max_v_angle - min_v_angle) + min_v_angle; // // imgOut[8 * index] = imgIn[6 * index]; // imgOut[8 * index + 1] = azimuth; // imgOut[8 * index + 2] = elevation; // imgOut[8 * index + 3] = imgIn[6 * index + 2]; // imgOut[8 * index + 4] = imgIn[6 * index + 3]; // imgOut[8 * index + 5] = imgIn[6 * index + 4]; // imgOut[8 * index + 6] = imgIn[6 * index + 1]; // imgOut[8 * index + 7] = imgIn[6 * index + 5]; // } //} // Converts a depth and intensity buffer to an XZY and intensity buffer __global__ void radar_pointcloud_from_angles_kernel(float* imgIn, float* imgOut, int w, int h, float hfov, float vfov) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < w * h) { // int hIndex = index % w; // int vIndex = index / w; // // float vAngle = (vIndex / (float)(h)) * (max_v_angle - min_v_angle) + min_v_angle; // float hAngle = (hIndex / (float)(w)) * hfov - hfov / 2.; float range = imgIn[8 * index]; float azimuth = imgIn[8 * index + 1]; float elevation = imgIn[8 * index + 2]; float proj_xy = range * cosf(elevation); float x = proj_xy * cosf(azimuth); float y = proj_xy * sinf(azimuth); float z = range * sinf(elevation); imgOut[8 * index] = x; imgOut[8 * index + 1] = y; imgOut[8 * index + 2] = z; imgOut[8 * index + 3] = imgIn[8 * index + 3]; imgOut[8 * index + 4] = imgIn[8 * index + 4]; imgOut[8 * index + 5] = imgIn[8 * index + 5]; imgOut[8 * index + 6] = imgIn[8 * index + 6]; imgOut[8 * index + 7] = imgIn[8 * index + 7]; } } void cuda_radar_pointcloud_from_angles(void* bufIn, void* bufOut, int width, int height, float hfov, float vfov, hipStream_t& stream){ const int nThreads = 512; int nBlocks = (width * height + nThreads - 1) / nThreads; hipLaunchKernelGGL(( radar_pointcloud_from_angles_kernel), dim3(nBlocks), dim3(nThreads), 0, stream, (float*)bufIn, (float*)bufOut, width, height, hfov, vfov); } //void cuda_radar_pointcloud(void* bufIn, // void* bufOut, // ) } }
c69a0b841c7ec4320574b387ce8b0b95ef1bbf6f.cu
#include <cuda.h> #include <iostream> #include "radarprocess.cuh" namespace chrono{ namespace sensor{ //__global__ void radar_angle_kernel(float* imgIn, // float* imgOut, // int w, // int h, // float hfov, // float max_v_angle, // float min_v_angle){ // int index = blockDim.x * blockIdx.x + threadIdx.x; // if (index < w * h){ // int hIndex = index % w; // int vIndex = index / w; // // float azimuth = (hIndex / (float)(w)) * hfov - hfov / 2.; // float elevation = (vIndex / (float)(h)) * (max_v_angle - min_v_angle) + min_v_angle; // // imgOut[8 * index] = imgIn[6 * index]; // imgOut[8 * index + 1] = azimuth; // imgOut[8 * index + 2] = elevation; // imgOut[8 * index + 3] = imgIn[6 * index + 2]; // imgOut[8 * index + 4] = imgIn[6 * index + 3]; // imgOut[8 * index + 5] = imgIn[6 * index + 4]; // imgOut[8 * index + 6] = imgIn[6 * index + 1]; // imgOut[8 * index + 7] = imgIn[6 * index + 5]; // } //} // Converts a depth and intensity buffer to an XZY and intensity buffer __global__ void radar_pointcloud_from_angles_kernel(float* imgIn, float* imgOut, int w, int h, float hfov, float vfov) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < w * h) { // int hIndex = index % w; // int vIndex = index / w; // // float vAngle = (vIndex / (float)(h)) * (max_v_angle - min_v_angle) + min_v_angle; // float hAngle = (hIndex / (float)(w)) * hfov - hfov / 2.; float range = imgIn[8 * index]; float azimuth = imgIn[8 * index + 1]; float elevation = imgIn[8 * index + 2]; float proj_xy = range * cosf(elevation); float x = proj_xy * cosf(azimuth); float y = proj_xy * sinf(azimuth); float z = range * sinf(elevation); imgOut[8 * index] = x; imgOut[8 * index + 1] = y; imgOut[8 * index + 2] = z; imgOut[8 * index + 3] = imgIn[8 * index + 3]; imgOut[8 * index + 4] = imgIn[8 * index + 4]; imgOut[8 * index + 5] = imgIn[8 * index + 5]; imgOut[8 * index + 6] = imgIn[8 * index + 6]; imgOut[8 * index + 7] = imgIn[8 * index + 7]; } } void cuda_radar_pointcloud_from_angles(void* bufIn, void* bufOut, int width, int height, float hfov, float vfov, CUstream& stream){ const int nThreads = 512; int nBlocks = (width * height + nThreads - 1) / nThreads; radar_pointcloud_from_angles_kernel<<<nBlocks, nThreads, 0, stream>>>((float*)bufIn, (float*)bufOut, width, height, hfov, vfov); } //void cuda_radar_pointcloud(void* bufIn, // void* bufOut, // ) } }
76d78420b4f90585cb47478a47f631864b36eda3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DATA_SIZE 200000000 #include"cuda_runtime.h" #include"cuda.h" #include"hip/hip_runtime_api.h" #include<stdio.h> #include <stdlib.h> using namespace std; int data[DATA_SIZE]; void GenerateNumber(int *number,int size) { printf("number: "); for(int i=0 ; i<size ; i++) { number[i] = rand()%10; //printf("%d ",number[i]); } } __device__ void qq() { extern __shared__ int shared[][1]; const int tid = threadIdx.x; if(tid==0){ for(int i=1;i<blockDim.x;i++) shared[0][0] += shared[i][0]; } } __global__ static void sumOfSquares(int *num,int * result) { extern __shared__ int shared[][1]; const int tid = threadIdx.x; const int bid = blockIdx.x; int sum = 0; int i; for(i=bid*blockDim.x+tid ; i<DATA_SIZE ; i+= blockDim.x * gridDim.x) sum += num[i]*num[i]; shared[tid][0] = sum; __syncthreads(); qq(); result[bid] = shared[0][0]; } int main() { GenerateNumber(data,DATA_SIZE); int sum[100] = {0}; clock_t begin_time = clock(); for(int i=0;i<DATA_SIZE;i++) sum[0] += data[i] * data[i]; printf("time: %f\n",float( clock () - begin_time ) / CLOCKS_PER_SEC); printf("sum:%d\n",sum[0]); int *gpudata, *result; hipMalloc((void**) &gpudata,sizeof(int)*DATA_SIZE); hipMalloc((void**) &result,sizeof(int)*100); hipMemcpy(gpudata,data,sizeof(int)*DATA_SIZE,hipMemcpyHostToDevice); begin_time = clock(); hipLaunchKernelGGL(( sumOfSquares), dim3(10),dim3(10),0, 0, gpudata,result); hipMemcpy(&sum,result,sizeof(int)*1*1,hipMemcpyDeviceToHost); printf("sum:%d\n",sum[0]); printf("---0time: %f\n",float( clock () - begin_time ) / CLOCKS_PER_SEC); hipFree(gpudata); hipFree(result); return 0; }
76d78420b4f90585cb47478a47f631864b36eda3.cu
#define DATA_SIZE 200000000 #include"cuda_runtime.h" #include"cuda.h" #include"cuda_runtime_api.h" #include<stdio.h> #include <stdlib.h> using namespace std; int data[DATA_SIZE]; void GenerateNumber(int *number,int size) { printf("number: "); for(int i=0 ; i<size ; i++) { number[i] = rand()%10; //printf("%d ",number[i]); } } __device__ void qq() { extern __shared__ int shared[][1]; const int tid = threadIdx.x; if(tid==0){ for(int i=1;i<blockDim.x;i++) shared[0][0] += shared[i][0]; } } __global__ static void sumOfSquares(int *num,int * result) { extern __shared__ int shared[][1]; const int tid = threadIdx.x; const int bid = blockIdx.x; int sum = 0; int i; for(i=bid*blockDim.x+tid ; i<DATA_SIZE ; i+= blockDim.x * gridDim.x) sum += num[i]*num[i]; shared[tid][0] = sum; __syncthreads(); qq(); result[bid] = shared[0][0]; } int main() { GenerateNumber(data,DATA_SIZE); int sum[100] = {0}; clock_t begin_time = clock(); for(int i=0;i<DATA_SIZE;i++) sum[0] += data[i] * data[i]; printf("time: %f\n",float( clock () - begin_time ) / CLOCKS_PER_SEC); printf("sum:%d\n",sum[0]); int *gpudata, *result; cudaMalloc((void**) &gpudata,sizeof(int)*DATA_SIZE); cudaMalloc((void**) &result,sizeof(int)*100); cudaMemcpy(gpudata,data,sizeof(int)*DATA_SIZE,cudaMemcpyHostToDevice); begin_time = clock(); sumOfSquares<<<10,10,0>>>(gpudata,result); cudaMemcpy(&sum,result,sizeof(int)*1*1,cudaMemcpyDeviceToHost); printf("sum:%d\n",sum[0]); printf("---0time: %f\n",float( clock () - begin_time ) / CLOCKS_PER_SEC); cudaFree(gpudata); cudaFree(result); return 0; }
cde06f55e5d9f2538db7bdeb898143cc21926751.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void relToAbsIndex3d_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> relIndx, const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> init_spIndx, torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> absIndx, int length, int height, int width, int Kl, int Kh, int Kw, int K) { // indexing const int n = blockIdx.y; int d = blockIdx.x * blockDim.x + threadIdx.x; const int HW = height * width; const int l = d / HW; d = d % HW; const int h = d / width; const int w = d % width; if (l < length) { // Convert spix_idx based on the rel_idx const int rel_idx = static_cast<int>(relIndx[n][0][l][h][w]); const int rel_idx_l = rel_idx / 9 - 1; int rel_idx_h = (rel_idx % 9) / 3 - 1; int rel_idx_w = (rel_idx % 9) % 3 - 1; const int init_spix_idx = static_cast<int>(init_spIndx[n][0][l][h][w]); const int Khw = Kh * Kw; int spix_idx_l = init_spix_idx + rel_idx_l * Khw; if (spix_idx_l >= K || spix_idx_l <= -1) { spix_idx_l = init_spix_idx; } if (((spix_idx_l + Kw) % Khw) == 0 && rel_idx_h == 1) { rel_idx_h = 0; } else if ((spix_idx_l % Khw) == 0 && rel_idx_h == -1) { rel_idx_h = 0; } int spix_idx_h = spix_idx_l + rel_idx_h * Kw; if (spix_idx_h >= K || spix_idx_h <= -1) { spix_idx_h = spix_idx_l; } if (((spix_idx_h + 1) % Kw) == 0 && rel_idx_w == 1) { rel_idx_w = 0; } else if ((spix_idx_h % Kw) == 0 && rel_idx_w == -1) { rel_idx_w = 0; } int spix_idx_w = spix_idx_h + rel_idx_w; if (spix_idx_w < K && spix_idx_w > -1) { absIndx[n][0][l][h][w] = static_cast<float>(spix_idx_w); } else { absIndx[n][0][l][h][w] = static_cast<float>(spix_idx_h); } } } } // namespace torch::Tensor relToAbsIndex3d_cuda_forward( const torch::Tensor relIndx, // B 1 L H W const torch::Tensor init_spIndx, // B 1 L H W const int Kl, const int Kh, const int Kw) { // setup const auto batch_size = relIndx.size(0); const auto length = relIndx.size(2); const auto height = relIndx.size(3); const auto width = relIndx.size(4); auto absIndx = torch::zeros_like(relIndx); // B 1 H W // launch kernel const int threads = 1024; const dim3 blocks((length * height * width + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(relIndx.type(), "relToAbsIndex3d_forward_cuda", ([&] { hipLaunchKernelGGL(( relToAbsIndex3d_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, relIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(), init_spIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(), absIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(), length, height, width, Kl, Kh, Kw, Kl*Kh*Kw); })); return absIndx; }
cde06f55e5d9f2538db7bdeb898143cc21926751.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void relToAbsIndex3d_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> relIndx, const torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> init_spIndx, torch::PackedTensorAccessor<scalar_t,5,torch::RestrictPtrTraits,size_t> absIndx, int length, int height, int width, int Kl, int Kh, int Kw, int K) { // indexing const int n = blockIdx.y; int d = blockIdx.x * blockDim.x + threadIdx.x; const int HW = height * width; const int l = d / HW; d = d % HW; const int h = d / width; const int w = d % width; if (l < length) { // Convert spix_idx based on the rel_idx const int rel_idx = static_cast<int>(relIndx[n][0][l][h][w]); const int rel_idx_l = rel_idx / 9 - 1; int rel_idx_h = (rel_idx % 9) / 3 - 1; int rel_idx_w = (rel_idx % 9) % 3 - 1; const int init_spix_idx = static_cast<int>(init_spIndx[n][0][l][h][w]); const int Khw = Kh * Kw; int spix_idx_l = init_spix_idx + rel_idx_l * Khw; if (spix_idx_l >= K || spix_idx_l <= -1) { spix_idx_l = init_spix_idx; } if (((spix_idx_l + Kw) % Khw) == 0 && rel_idx_h == 1) { rel_idx_h = 0; } else if ((spix_idx_l % Khw) == 0 && rel_idx_h == -1) { rel_idx_h = 0; } int spix_idx_h = spix_idx_l + rel_idx_h * Kw; if (spix_idx_h >= K || spix_idx_h <= -1) { spix_idx_h = spix_idx_l; } if (((spix_idx_h + 1) % Kw) == 0 && rel_idx_w == 1) { rel_idx_w = 0; } else if ((spix_idx_h % Kw) == 0 && rel_idx_w == -1) { rel_idx_w = 0; } int spix_idx_w = spix_idx_h + rel_idx_w; if (spix_idx_w < K && spix_idx_w > -1) { absIndx[n][0][l][h][w] = static_cast<float>(spix_idx_w); } else { absIndx[n][0][l][h][w] = static_cast<float>(spix_idx_h); } } } } // namespace torch::Tensor relToAbsIndex3d_cuda_forward( const torch::Tensor relIndx, // B 1 L H W const torch::Tensor init_spIndx, // B 1 L H W const int Kl, const int Kh, const int Kw) { // setup const auto batch_size = relIndx.size(0); const auto length = relIndx.size(2); const auto height = relIndx.size(3); const auto width = relIndx.size(4); auto absIndx = torch::zeros_like(relIndx); // B 1 H W // launch kernel const int threads = 1024; const dim3 blocks((length * height * width + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(relIndx.type(), "relToAbsIndex3d_forward_cuda", ([&] { relToAbsIndex3d_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( relIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(), init_spIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(), absIndx.packed_accessor<scalar_t,5,torch::RestrictPtrTraits,size_t>(), length, height, width, Kl, Kh, Kw, Kl*Kh*Kw); })); return absIndx; }
765f804bb71b358bd56c509115eaf6bfa603a6ec.hip
// !!! This is a file automatically generated by hipify!!! //-------------------------------------------------------------------------------- // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met : // // *Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright(c) 2019, Sergen Eren // All rights reserved. //---------------------------------------------------------------------------------- // // Version 1.0: Sergen Eren, 29/11/2019 // // File: Contains the kernels for construction of volume bvh on gpu // from https://github.com/henrikdahlberg/GPUPathTracer // //----------------------------------------------- #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <device_launch_parameters.h> #include <thrust/random.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <thrust/functional.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <helper_math.h> #include "bvh.h" #include "gpu_vdb.h" #define BLOCK_SIZE 32 #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall(hipError_t err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (hipSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } #endif return; } inline void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } // More careful checking. However, this will affect performance. // Comment away if needed. err = hipDeviceSynchronize(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } #endif return; } ////////////////////////////////////////////////////////////////////////// // Device functions ////////////////////////////////////////////////////////////////////////// /** * Longest common prefix for Morton code */ __device__ int LongestCommonPrefix(int i, int j, int numTriangles, MortonCode* mortonCodes, int* triangleIDs) { if (i < 0 || i > numTriangles - 1 || j < 0 || j > numTriangles - 1) { return -1; } MortonCode mi = mortonCodes[i]; MortonCode mj = mortonCodes[j]; if (mi == mj) { return __clzll(mi ^ mj) + __clzll(triangleIDs[i] ^ triangleIDs[j]); } else { return __clzll(mi ^ mj); } } /** * Expand bits, used in Morton code calculation */ __device__ MortonCode bitExpansion(MortonCode i) { i = (i * 0x00010001u) & 0xFF0000FFu; i = (i * 0x00000101u) & 0x0F00F00Fu; i = (i * 0x00000011u) & 0xC30C30C3u; i = (i * 0x00000005u) & 0x49249249u; return i; } /** * Compute morton code given volume centroid scaled to [0,1] of scene bounding box */ __device__ MortonCode ComputeMortonCode(float x, float y, float z) { x = min(max(x * 1024.0f, 0.0f), 1023.0f); y = min(max(y * 1024.0f, 0.0f), 1023.0f); z = min(max(z * 1024.0f, 0.0f), 1023.0f); MortonCode xx = bitExpansion((MortonCode)x); MortonCode yy = bitExpansion((MortonCode)y); MortonCode zz = bitExpansion((MortonCode)z); return xx * 4 + yy * 2 + zz; } __device__ AABB divide_bbox(int idx, float3 pmin, float3 pmax) { float3 min = make_float3(.0f); float3 max = make_float3(.0f); float half_x = (pmin.x + pmax.x)*0.5; float half_y = (pmin.y + pmax.y)*0.5; float half_z = (pmin.z + pmax.z)*0.5; if (idx == 0) { min = make_float3(pmin.x, half_y, pmin.z); max = make_float3(half_x, pmax.y, half_z); } if (idx == 1) { min = make_float3(half_x, half_y, pmin.z); max = make_float3(pmax.x, pmax.y, half_z); } if (idx == 2) { min = pmin; max = make_float3(half_x, half_y, half_z); } if (idx == 3) { min = make_float3(half_x, pmin.y, pmin.z); max = make_float3(pmax.x, half_y, half_z); } if (idx == 4) { min = make_float3(pmin.x, half_y, half_z); max = make_float3(half_x, pmax.y, pmax.z); } if (idx == 5) { min = make_float3(half_x, half_y, half_z); max = pmax; } if (idx == 6) { min = make_float3(pmin.x, pmin.y, half_z); max = make_float3(half_x, half_y, pmax.z); } if (idx == 7) { min = make_float3(half_x, pmin.y, half_z); max = make_float3(pmax.x, half_y, pmax.z); } return AABB(min, max); } __device__ void build_octree_recursive(GPU_VDB *vdbs, int num_volumes, OCTNode *root, int depth, bool m_debug) { if (depth > 0) { if (root->num_volumes > 0) { for (int i = 0; i < 8; ++i) { root->children[i] = new OCTNode; root->children[i]->parent = root; root->children[i]->depth = depth; float3 pmin = root->bbox.pmin; float3 pmax = root->bbox.pmax; root->children[i]->bbox = divide_bbox(i, pmin, pmax); int idx = 0; for (int y = 0; y < num_volumes; ++y) { if (Overlaps(root->children[i]->bbox, vdbs[y].Bounds())) { root->children[i]->num_volumes++; root->children[i]->vol_indices[idx] = y; root->children[i]->max_extinction = fmaxf(root->children[i]->max_extinction, vdbs[y].vdb_info.max_density); root->children[i]->min_extinction = fminf(root->children[i]->min_extinction, vdbs[y].vdb_info.min_density); root->children[i]->voxel_size = fminf(root->children[i]->voxel_size, vdbs[y].vdb_info.voxelsize); idx++; } } if (root->children[i]->num_volumes>0) root->children[i]->has_children = true; if (m_debug) { printf("num volumes for child %d-%d is %d ", depth, i, root->children[i]->num_volumes); if (root->children[i]->num_volumes > 0) { printf("volume indices: "); for (int x = 0; x < root->children[i]->num_volumes; ++x) { printf("%d ", root->children[i]->vol_indices[x]); } } printf(" max extinction: %f\n", root->children[i]->max_extinction); printf(" min extinction: %f\n", root->children[i]->min_extinction); } build_octree_recursive(vdbs, num_volumes, root->children[i], depth - 1, m_debug); } } } } ////////////////////////////////////////////////////////////////////////// // Kernels ////////////////////////////////////////////////////////////////////////// __global__ void ComputeBoundingBoxes(GPU_VDB* volumes, int numVolumes, AABB* boundingBoxes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numVolumes) boundingBoxes[i] = volumes[i].Bounds(); } __global__ void DebugBVH(BVHNode* BVHLeaves, BVHNode* BVHNodes, int numVolumes) { int i = blockIdx.x * blockDim.x + threadIdx.x; // do in serial if (i == 0) { for (int j = 0; j < numVolumes; j++) { BVHNode* currentNode = BVHLeaves + j; printf("BBox for volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n", (BVHLeaves + j)->volIndex, currentNode->boundingBox.pmin.x, currentNode->boundingBox.pmin.y, currentNode->boundingBox.pmin.z, currentNode->boundingBox.pmax.x, currentNode->boundingBox.pmax.y, currentNode->boundingBox.pmax.z); } //parents: for (int j = 0; j < numVolumes; j++) { BVHNode* currentNode = (BVHLeaves + j)->parent; printf("BBox for parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n", (BVHLeaves + j)->volIndex, currentNode->boundingBox.pmin.x, currentNode->boundingBox.pmin.y, currentNode->boundingBox.pmin.z, currentNode->boundingBox.pmax.x, currentNode->boundingBox.pmax.y, currentNode->boundingBox.pmax.z); } for (int j = 0; j < numVolumes; j++) { BVHNode* currentNode = (BVHLeaves + j)->parent->parent; printf("BBox for parents parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n", (BVHLeaves + j)->volIndex, currentNode->boundingBox.pmin.x, currentNode->boundingBox.pmin.y, currentNode->boundingBox.pmin.z, currentNode->boundingBox.pmax.x, currentNode->boundingBox.pmax.y, currentNode->boundingBox.pmax.z); } for (int j = 0; j < numVolumes; j++) { BVHNode* currentNode = (BVHLeaves + j)->parent->parent->parent; printf("BBox for parents parents parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n", (BVHLeaves + j)->volIndex, currentNode->boundingBox.pmin.x, currentNode->boundingBox.pmin.y, currentNode->boundingBox.pmin.z, currentNode->boundingBox.pmax.x, currentNode->boundingBox.pmax.y, currentNode->boundingBox.pmax.z); } } } __global__ void ComputeMortonCodes(const GPU_VDB* volumes, int numTriangles, AABB sceneBounds, MortonCode* mortonCodes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numTriangles) { // Compute volume centroid float3 centroid = volumes[i].Bounds().Centroid(); // Normalize triangle centroid to lie within [0,1] of scene bounding box float x = (centroid.x - sceneBounds.pmin.x) / (sceneBounds.pmax.x - sceneBounds.pmin.x); float y = (centroid.y - sceneBounds.pmin.y) / (sceneBounds.pmax.y - sceneBounds.pmin.y); float z = (centroid.z - sceneBounds.pmin.z) / (sceneBounds.pmax.z - sceneBounds.pmin.z); // Compute morton code mortonCodes[i] = ComputeMortonCode(x, y, z); } } __global__ void ConstructBVH(BVHNode* BVHNodes, BVHNode* BVHLeaves, int* nodeCounter, GPU_VDB* volumes, int* volumeIDs, int numVolumes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numVolumes) { BVHNode* leaf = BVHLeaves + i; int volumeIdx = volumeIDs[i]; // Handle leaf first leaf->volIndex = volumeIdx; leaf->boundingBox = volumes[volumeIdx].Bounds(); BVHNode* current = leaf->parent; int currentIndex = current - BVHNodes; int res = atomicAdd(nodeCounter + currentIndex, 1); // Go up and handle internal nodes while (true) { if (res == 0) { return; } AABB leftBoundingBox = current->leftChild->boundingBox; AABB rightBoundingBox = current->rightChild->boundingBox; // Compute current bounding box current->boundingBox = UnionB(leftBoundingBox, rightBoundingBox); // If current is root, return if (current == BVHNodes) { return; } current = current->parent; currentIndex = current - BVHNodes; res = atomicAdd(nodeCounter + currentIndex, 1); } } } __global__ void BuildRadixTree(BVHNode* radixTreeNodes, BVHNode* radixTreeLeaves, MortonCode* mortonCodes, int* volumeIds, int numVolumes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numVolumes - 1) { // Run radix tree construction algorithm // Determine direction of the range (+1 or -1) int d = LongestCommonPrefix(i, i + 1, numVolumes, mortonCodes, volumeIds) - LongestCommonPrefix(i, i - 1, numVolumes, mortonCodes, volumeIds) >= 0 ? 1 : -1; // Compute upper bound for the length of the range int deltaMin = LongestCommonPrefix(i, i - d, numVolumes, mortonCodes, volumeIds); //int lmax = 128; int lmax = 2; while (LongestCommonPrefix(i, i + lmax * d, numVolumes, mortonCodes, volumeIds) > deltaMin) { //lmax = lmax * 4; lmax = lmax * 2; } // Find the other end using binary search int l = 0; int divider = 2; for (int t = lmax / divider; t >= 1; divider *= 2) { if (LongestCommonPrefix(i, i + (l + t) * d, numVolumes, mortonCodes, volumeIds) > deltaMin) { l = l + t; } if (t == 1) break; t = lmax / divider; } int j = i + l * d; // Find the split position using binary search int deltaNode = LongestCommonPrefix(i, j, numVolumes, mortonCodes, volumeIds); int s = 0; divider = 2; for (int t = (l + (divider - 1)) / divider; t >= 1; divider *= 2) { if (LongestCommonPrefix(i, i + (s + t) * d, numVolumes, mortonCodes, volumeIds) > deltaNode) { s = s + t; } if (t == 1) break; t = (l + (divider - 1)) / divider; } int gamma = i + s * d + min(d, 0); //printf("i:%d, d:%d, deltaMin:%d, deltaNode:%d, lmax:%d, l:%d, j:%d, gamma:%d. \n", i, d, deltaMin, deltaNode, lmax, l, j, gamma); // Output child pointers BVHNode* current = radixTreeNodes + i; if (min(i, j) == gamma) { current->leftChild = radixTreeLeaves + gamma; (radixTreeLeaves + gamma)->parent = current; } else { current->leftChild = radixTreeNodes + gamma; (radixTreeNodes + gamma)->parent = current; } if (max(i, j) == gamma + 1) { current->rightChild = radixTreeLeaves + gamma + 1; (radixTreeLeaves + gamma + 1)->parent = current; } else { current->rightChild = radixTreeNodes + gamma + 1; (radixTreeNodes + gamma + 1)->parent = current; } current->minId = min(i, j); current->maxId = max(i, j); } } __global__ void pass_octree(GPU_VDB *volumes, int num_volumes, OCTNode *root, int depth, bool m_debug) { build_octree_recursive(volumes, num_volumes, root, depth, m_debug); } extern "C" void BuildBVH(BVH& bvh, GPU_VDB* volumes, int numVolumes, AABB &sceneBounds, bool debug_bvh) { int blockSize = BLOCK_SIZE; int gridSize = (numVolumes + blockSize - 1) / blockSize; // Timing metrics float total = 0; float elapsed; hipEvent_t start, stop; if (debug_bvh) printf("Number of volumes: %i\n", numVolumes); hipEventCreate(&start); hipEventCreate(&stop); // Pre-process stage, scene bounding box // TODO: add check if this has been done already // if we already have scenebounds and have new/modified triangles, no need to start over // Should only do this if scene has changed (added tris, moved tris) // Compute bounding boxes if (debug_bvh) printf("Computing volume bounding boxes..."); hipEventRecord(start, 0); thrust::device_vector<AABB> boundingBoxes(numVolumes); hipLaunchKernelGGL(( ComputeBoundingBoxes) , dim3(gridSize), dim3(blockSize), 0, 0, volumes, numVolumes, boundingBoxes.data().get()); CudaCheckError(); checkCudaErrors(hipGetLastError()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; thrust::host_vector<AABB> bounding_boxes_h = boundingBoxes; // Compute scene bounding box if (debug_bvh) printf("Computing scene bounding box..."); hipEventRecord(start, 0); sceneBounds = thrust::reduce(boundingBoxes.begin(), boundingBoxes.end(), AABB(), AABBUnion()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; if (debug_bvh) printf("Total pre-computation time for scene was %f ms. \n", total); total = 0; if (debug_bvh) { printf("Scene boundingbox:\n"); printf("pmin: %f, %f, %f\n", sceneBounds.pmin.x, sceneBounds.pmin.y, sceneBounds.pmin.z); printf("pmax: %f, %f, %f\n", sceneBounds.pmax.x, sceneBounds.pmax.y, sceneBounds.pmax.z); } // Pre-process done, start building BVH // Compute Morton codes thrust::device_vector<MortonCode> mortonCodes(numVolumes); if (debug_bvh) printf("Computing Morton codes..."); hipEventRecord(start, 0); hipLaunchKernelGGL(( ComputeMortonCodes) , dim3(gridSize), dim3(blockSize), 0, 0, volumes, numVolumes, sceneBounds, mortonCodes.data().get()); CudaCheckError(); checkCudaErrors(hipGetLastError()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; // Sort triangle indices with Morton code as key thrust::device_vector<int> triangleIDs(numVolumes); thrust::sequence(triangleIDs.begin(), triangleIDs.end()); if (debug_bvh) printf("Sort volumes..."); hipEventRecord(start, 0); try { thrust::sort_by_key(mortonCodes.begin(), mortonCodes.end(), triangleIDs.begin()); } catch (thrust::system_error e) { printf("Error inside sort: %s\n", e.what()); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Sorting took %f ms. \n", elapsed); total += elapsed; // Build radix tree of BVH nodes checkCudaErrors(hipMalloc((void**)&bvh.BVHNodes, (numVolumes - 1) * sizeof(BVHNode))); checkCudaErrors(hipMalloc((void**)&bvh.BVHLeaves, numVolumes * sizeof(BVHNode))); if (debug_bvh) printf("Building radix tree..."); hipEventRecord(start, 0); hipLaunchKernelGGL(( BuildRadixTree) , dim3(gridSize), dim3(blockSize), 0, 0, bvh.BVHNodes, bvh.BVHLeaves, mortonCodes.data().get(), triangleIDs.data().get(), numVolumes); CudaCheckError(); checkCudaErrors(hipGetLastError()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; // Build BVH thrust::device_vector<int> nodeCounters(numVolumes); if (debug_bvh) printf("Building BVH..."); hipEventRecord(start, 0); hipLaunchKernelGGL(( ConstructBVH) , dim3(gridSize), dim3(blockSize) , 0, 0, bvh.BVHNodes, bvh.BVHLeaves, nodeCounters.data().get(), volumes, triangleIDs.data().get(), numVolumes); CudaCheckError(); checkCudaErrors(hipDeviceSynchronize()); //DebugBVH << <gridSize, blockSize >> >(bvh.BVHLeaves, bvh.BVHNodes, numVolumes); checkCudaErrors(hipGetLastError()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; if (debug_bvh) printf("Total BVH construction time was %f ms. \n", total); hipEventDestroy(start); hipEventDestroy(stop); } extern "C" void build_octree(OCTNode *root, GPU_VDB *volumes, int num_volumes, int depth, bool m_debug) { float elapsed; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); if (m_debug) printf("Building Octree..."); hipEventRecord(start, 0); pass_octree << <1, 1 >> > (volumes, num_volumes, root, depth, false); CudaCheckError(); checkCudaErrors(hipGetLastError()); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); checkCudaErrors(hipDeviceSynchronize()); if(m_debug) printf("done! Computation took %f ms. \n", elapsed); hipEventDestroy(start); hipEventDestroy(stop); }
765f804bb71b358bd56c509115eaf6bfa603a6ec.cu
//-------------------------------------------------------------------------------- // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met : // // *Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright(c) 2019, Sergen Eren // All rights reserved. //---------------------------------------------------------------------------------- // // Version 1.0: Sergen Eren, 29/11/2019 // // File: Contains the kernels for construction of volume bvh on gpu // from https://github.com/henrikdahlberg/GPUPathTracer // //----------------------------------------------- #include <cuda.h> #include <cuda_runtime.h> #include <device_functions.h> #include <device_launch_parameters.h> #include <thrust/random.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <thrust/functional.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <helper_math.h> #include "bvh.h" #include "gpu_vdb.h" #define BLOCK_SIZE 32 #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall(cudaError err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } #endif return; } inline void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } #endif return; } ////////////////////////////////////////////////////////////////////////// // Device functions ////////////////////////////////////////////////////////////////////////// /** * Longest common prefix for Morton code */ __device__ int LongestCommonPrefix(int i, int j, int numTriangles, MortonCode* mortonCodes, int* triangleIDs) { if (i < 0 || i > numTriangles - 1 || j < 0 || j > numTriangles - 1) { return -1; } MortonCode mi = mortonCodes[i]; MortonCode mj = mortonCodes[j]; if (mi == mj) { return __clzll(mi ^ mj) + __clzll(triangleIDs[i] ^ triangleIDs[j]); } else { return __clzll(mi ^ mj); } } /** * Expand bits, used in Morton code calculation */ __device__ MortonCode bitExpansion(MortonCode i) { i = (i * 0x00010001u) & 0xFF0000FFu; i = (i * 0x00000101u) & 0x0F00F00Fu; i = (i * 0x00000011u) & 0xC30C30C3u; i = (i * 0x00000005u) & 0x49249249u; return i; } /** * Compute morton code given volume centroid scaled to [0,1] of scene bounding box */ __device__ MortonCode ComputeMortonCode(float x, float y, float z) { x = min(max(x * 1024.0f, 0.0f), 1023.0f); y = min(max(y * 1024.0f, 0.0f), 1023.0f); z = min(max(z * 1024.0f, 0.0f), 1023.0f); MortonCode xx = bitExpansion((MortonCode)x); MortonCode yy = bitExpansion((MortonCode)y); MortonCode zz = bitExpansion((MortonCode)z); return xx * 4 + yy * 2 + zz; } __device__ AABB divide_bbox(int idx, float3 pmin, float3 pmax) { float3 min = make_float3(.0f); float3 max = make_float3(.0f); float half_x = (pmin.x + pmax.x)*0.5; float half_y = (pmin.y + pmax.y)*0.5; float half_z = (pmin.z + pmax.z)*0.5; if (idx == 0) { min = make_float3(pmin.x, half_y, pmin.z); max = make_float3(half_x, pmax.y, half_z); } if (idx == 1) { min = make_float3(half_x, half_y, pmin.z); max = make_float3(pmax.x, pmax.y, half_z); } if (idx == 2) { min = pmin; max = make_float3(half_x, half_y, half_z); } if (idx == 3) { min = make_float3(half_x, pmin.y, pmin.z); max = make_float3(pmax.x, half_y, half_z); } if (idx == 4) { min = make_float3(pmin.x, half_y, half_z); max = make_float3(half_x, pmax.y, pmax.z); } if (idx == 5) { min = make_float3(half_x, half_y, half_z); max = pmax; } if (idx == 6) { min = make_float3(pmin.x, pmin.y, half_z); max = make_float3(half_x, half_y, pmax.z); } if (idx == 7) { min = make_float3(half_x, pmin.y, half_z); max = make_float3(pmax.x, half_y, pmax.z); } return AABB(min, max); } __device__ void build_octree_recursive(GPU_VDB *vdbs, int num_volumes, OCTNode *root, int depth, bool m_debug) { if (depth > 0) { if (root->num_volumes > 0) { for (int i = 0; i < 8; ++i) { root->children[i] = new OCTNode; root->children[i]->parent = root; root->children[i]->depth = depth; float3 pmin = root->bbox.pmin; float3 pmax = root->bbox.pmax; root->children[i]->bbox = divide_bbox(i, pmin, pmax); int idx = 0; for (int y = 0; y < num_volumes; ++y) { if (Overlaps(root->children[i]->bbox, vdbs[y].Bounds())) { root->children[i]->num_volumes++; root->children[i]->vol_indices[idx] = y; root->children[i]->max_extinction = fmaxf(root->children[i]->max_extinction, vdbs[y].vdb_info.max_density); root->children[i]->min_extinction = fminf(root->children[i]->min_extinction, vdbs[y].vdb_info.min_density); root->children[i]->voxel_size = fminf(root->children[i]->voxel_size, vdbs[y].vdb_info.voxelsize); idx++; } } if (root->children[i]->num_volumes>0) root->children[i]->has_children = true; if (m_debug) { printf("num volumes for child %d-%d is %d ", depth, i, root->children[i]->num_volumes); if (root->children[i]->num_volumes > 0) { printf("volume indices: "); for (int x = 0; x < root->children[i]->num_volumes; ++x) { printf("%d ", root->children[i]->vol_indices[x]); } } printf(" max extinction: %f\n", root->children[i]->max_extinction); printf(" min extinction: %f\n", root->children[i]->min_extinction); } build_octree_recursive(vdbs, num_volumes, root->children[i], depth - 1, m_debug); } } } } ////////////////////////////////////////////////////////////////////////// // Kernels ////////////////////////////////////////////////////////////////////////// __global__ void ComputeBoundingBoxes(GPU_VDB* volumes, int numVolumes, AABB* boundingBoxes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numVolumes) boundingBoxes[i] = volumes[i].Bounds(); } __global__ void DebugBVH(BVHNode* BVHLeaves, BVHNode* BVHNodes, int numVolumes) { int i = blockIdx.x * blockDim.x + threadIdx.x; // do in serial if (i == 0) { for (int j = 0; j < numVolumes; j++) { BVHNode* currentNode = BVHLeaves + j; printf("BBox for volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n", (BVHLeaves + j)->volIndex, currentNode->boundingBox.pmin.x, currentNode->boundingBox.pmin.y, currentNode->boundingBox.pmin.z, currentNode->boundingBox.pmax.x, currentNode->boundingBox.pmax.y, currentNode->boundingBox.pmax.z); } //parents: for (int j = 0; j < numVolumes; j++) { BVHNode* currentNode = (BVHLeaves + j)->parent; printf("BBox for parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n", (BVHLeaves + j)->volIndex, currentNode->boundingBox.pmin.x, currentNode->boundingBox.pmin.y, currentNode->boundingBox.pmin.z, currentNode->boundingBox.pmax.x, currentNode->boundingBox.pmax.y, currentNode->boundingBox.pmax.z); } for (int j = 0; j < numVolumes; j++) { BVHNode* currentNode = (BVHLeaves + j)->parent->parent; printf("BBox for parents parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n", (BVHLeaves + j)->volIndex, currentNode->boundingBox.pmin.x, currentNode->boundingBox.pmin.y, currentNode->boundingBox.pmin.z, currentNode->boundingBox.pmax.x, currentNode->boundingBox.pmax.y, currentNode->boundingBox.pmax.z); } for (int j = 0; j < numVolumes; j++) { BVHNode* currentNode = (BVHLeaves + j)->parent->parent->parent; printf("BBox for parents parents parent node of volumeIdx %d: pmin: (%f,%f,%f), pmax: (%f,%f,%f)\n", (BVHLeaves + j)->volIndex, currentNode->boundingBox.pmin.x, currentNode->boundingBox.pmin.y, currentNode->boundingBox.pmin.z, currentNode->boundingBox.pmax.x, currentNode->boundingBox.pmax.y, currentNode->boundingBox.pmax.z); } } } __global__ void ComputeMortonCodes(const GPU_VDB* volumes, int numTriangles, AABB sceneBounds, MortonCode* mortonCodes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numTriangles) { // Compute volume centroid float3 centroid = volumes[i].Bounds().Centroid(); // Normalize triangle centroid to lie within [0,1] of scene bounding box float x = (centroid.x - sceneBounds.pmin.x) / (sceneBounds.pmax.x - sceneBounds.pmin.x); float y = (centroid.y - sceneBounds.pmin.y) / (sceneBounds.pmax.y - sceneBounds.pmin.y); float z = (centroid.z - sceneBounds.pmin.z) / (sceneBounds.pmax.z - sceneBounds.pmin.z); // Compute morton code mortonCodes[i] = ComputeMortonCode(x, y, z); } } __global__ void ConstructBVH(BVHNode* BVHNodes, BVHNode* BVHLeaves, int* nodeCounter, GPU_VDB* volumes, int* volumeIDs, int numVolumes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numVolumes) { BVHNode* leaf = BVHLeaves + i; int volumeIdx = volumeIDs[i]; // Handle leaf first leaf->volIndex = volumeIdx; leaf->boundingBox = volumes[volumeIdx].Bounds(); BVHNode* current = leaf->parent; int currentIndex = current - BVHNodes; int res = atomicAdd(nodeCounter + currentIndex, 1); // Go up and handle internal nodes while (true) { if (res == 0) { return; } AABB leftBoundingBox = current->leftChild->boundingBox; AABB rightBoundingBox = current->rightChild->boundingBox; // Compute current bounding box current->boundingBox = UnionB(leftBoundingBox, rightBoundingBox); // If current is root, return if (current == BVHNodes) { return; } current = current->parent; currentIndex = current - BVHNodes; res = atomicAdd(nodeCounter + currentIndex, 1); } } } __global__ void BuildRadixTree(BVHNode* radixTreeNodes, BVHNode* radixTreeLeaves, MortonCode* mortonCodes, int* volumeIds, int numVolumes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < numVolumes - 1) { // Run radix tree construction algorithm // Determine direction of the range (+1 or -1) int d = LongestCommonPrefix(i, i + 1, numVolumes, mortonCodes, volumeIds) - LongestCommonPrefix(i, i - 1, numVolumes, mortonCodes, volumeIds) >= 0 ? 1 : -1; // Compute upper bound for the length of the range int deltaMin = LongestCommonPrefix(i, i - d, numVolumes, mortonCodes, volumeIds); //int lmax = 128; int lmax = 2; while (LongestCommonPrefix(i, i + lmax * d, numVolumes, mortonCodes, volumeIds) > deltaMin) { //lmax = lmax * 4; lmax = lmax * 2; } // Find the other end using binary search int l = 0; int divider = 2; for (int t = lmax / divider; t >= 1; divider *= 2) { if (LongestCommonPrefix(i, i + (l + t) * d, numVolumes, mortonCodes, volumeIds) > deltaMin) { l = l + t; } if (t == 1) break; t = lmax / divider; } int j = i + l * d; // Find the split position using binary search int deltaNode = LongestCommonPrefix(i, j, numVolumes, mortonCodes, volumeIds); int s = 0; divider = 2; for (int t = (l + (divider - 1)) / divider; t >= 1; divider *= 2) { if (LongestCommonPrefix(i, i + (s + t) * d, numVolumes, mortonCodes, volumeIds) > deltaNode) { s = s + t; } if (t == 1) break; t = (l + (divider - 1)) / divider; } int gamma = i + s * d + min(d, 0); //printf("i:%d, d:%d, deltaMin:%d, deltaNode:%d, lmax:%d, l:%d, j:%d, gamma:%d. \n", i, d, deltaMin, deltaNode, lmax, l, j, gamma); // Output child pointers BVHNode* current = radixTreeNodes + i; if (min(i, j) == gamma) { current->leftChild = radixTreeLeaves + gamma; (radixTreeLeaves + gamma)->parent = current; } else { current->leftChild = radixTreeNodes + gamma; (radixTreeNodes + gamma)->parent = current; } if (max(i, j) == gamma + 1) { current->rightChild = radixTreeLeaves + gamma + 1; (radixTreeLeaves + gamma + 1)->parent = current; } else { current->rightChild = radixTreeNodes + gamma + 1; (radixTreeNodes + gamma + 1)->parent = current; } current->minId = min(i, j); current->maxId = max(i, j); } } __global__ void pass_octree(GPU_VDB *volumes, int num_volumes, OCTNode *root, int depth, bool m_debug) { build_octree_recursive(volumes, num_volumes, root, depth, m_debug); } extern "C" void BuildBVH(BVH& bvh, GPU_VDB* volumes, int numVolumes, AABB &sceneBounds, bool debug_bvh) { int blockSize = BLOCK_SIZE; int gridSize = (numVolumes + blockSize - 1) / blockSize; // Timing metrics float total = 0; float elapsed; cudaEvent_t start, stop; if (debug_bvh) printf("Number of volumes: %i\n", numVolumes); cudaEventCreate(&start); cudaEventCreate(&stop); // Pre-process stage, scene bounding box // TODO: add check if this has been done already // if we already have scenebounds and have new/modified triangles, no need to start over // Should only do this if scene has changed (added tris, moved tris) // Compute bounding boxes if (debug_bvh) printf("Computing volume bounding boxes..."); cudaEventRecord(start, 0); thrust::device_vector<AABB> boundingBoxes(numVolumes); ComputeBoundingBoxes <<<gridSize, blockSize>>> (volumes, numVolumes, boundingBoxes.data().get()); CudaCheckError(); checkCudaErrors(cudaGetLastError()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; thrust::host_vector<AABB> bounding_boxes_h = boundingBoxes; // Compute scene bounding box if (debug_bvh) printf("Computing scene bounding box..."); cudaEventRecord(start, 0); sceneBounds = thrust::reduce(boundingBoxes.begin(), boundingBoxes.end(), AABB(), AABBUnion()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; if (debug_bvh) printf("Total pre-computation time for scene was %f ms. \n", total); total = 0; if (debug_bvh) { printf("Scene boundingbox:\n"); printf("pmin: %f, %f, %f\n", sceneBounds.pmin.x, sceneBounds.pmin.y, sceneBounds.pmin.z); printf("pmax: %f, %f, %f\n", sceneBounds.pmax.x, sceneBounds.pmax.y, sceneBounds.pmax.z); } // Pre-process done, start building BVH // Compute Morton codes thrust::device_vector<MortonCode> mortonCodes(numVolumes); if (debug_bvh) printf("Computing Morton codes..."); cudaEventRecord(start, 0); ComputeMortonCodes <<<gridSize, blockSize>>> (volumes, numVolumes, sceneBounds, mortonCodes.data().get()); CudaCheckError(); checkCudaErrors(cudaGetLastError()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; // Sort triangle indices with Morton code as key thrust::device_vector<int> triangleIDs(numVolumes); thrust::sequence(triangleIDs.begin(), triangleIDs.end()); if (debug_bvh) printf("Sort volumes..."); cudaEventRecord(start, 0); try { thrust::sort_by_key(mortonCodes.begin(), mortonCodes.end(), triangleIDs.begin()); } catch (thrust::system_error e) { printf("Error inside sort: %s\n", e.what()); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Sorting took %f ms. \n", elapsed); total += elapsed; // Build radix tree of BVH nodes checkCudaErrors(cudaMalloc((void**)&bvh.BVHNodes, (numVolumes - 1) * sizeof(BVHNode))); checkCudaErrors(cudaMalloc((void**)&bvh.BVHLeaves, numVolumes * sizeof(BVHNode))); if (debug_bvh) printf("Building radix tree..."); cudaEventRecord(start, 0); BuildRadixTree <<<gridSize, blockSize>>> (bvh.BVHNodes, bvh.BVHLeaves, mortonCodes.data().get(), triangleIDs.data().get(), numVolumes); CudaCheckError(); checkCudaErrors(cudaGetLastError()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; // Build BVH thrust::device_vector<int> nodeCounters(numVolumes); if (debug_bvh) printf("Building BVH..."); cudaEventRecord(start, 0); ConstructBVH <<<gridSize, blockSize >>> (bvh.BVHNodes, bvh.BVHLeaves, nodeCounters.data().get(), volumes, triangleIDs.data().get(), numVolumes); CudaCheckError(); checkCudaErrors(cudaDeviceSynchronize()); //DebugBVH << <gridSize, blockSize >> >(bvh.BVHLeaves, bvh.BVHNodes, numVolumes); checkCudaErrors(cudaGetLastError()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); if (debug_bvh) printf(" done! Computation took %f ms. \n", elapsed); total += elapsed; if (debug_bvh) printf("Total BVH construction time was %f ms. \n", total); cudaEventDestroy(start); cudaEventDestroy(stop); } extern "C" void build_octree(OCTNode *root, GPU_VDB *volumes, int num_volumes, int depth, bool m_debug) { float elapsed; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if (m_debug) printf("Building Octree..."); cudaEventRecord(start, 0); pass_octree << <1, 1 >> > (volumes, num_volumes, root, depth, false); CudaCheckError(); checkCudaErrors(cudaGetLastError()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); checkCudaErrors(cudaDeviceSynchronize()); if(m_debug) printf("done! Computation took %f ms. \n", elapsed); cudaEventDestroy(start); cudaEventDestroy(stop); }
3ffb66e7cd5e6bd19ab06f0f445d101c2ad76906.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void histogram(int n, int* color, int* bucket) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { int c = color[i]; atomicAdd(&bucket[c], 1); } } void host_histogram() { int N = 1 << 20; int M = 1 << 10; int *color_, *bucket_, *d_color, *d_bucket; color_ = (int*)malloc(N * sizeof(int)); bucket_ = (int*)malloc(M * sizeof(int)); hipMalloc(&d_color, N * sizeof(int)); hipMalloc(&d_bucket, M * sizeof(int)); memset(bucket_, 0, M * sizeof(int)); for (int i = 0; i < N; i++) { color_[i] = rand() % M; bucket_[color_[i]]++; } printf("cpu bucket: %d,%d,%d,%d,%d\n", bucket_[0], bucket_[1], bucket_[2], bucket_[3], bucket_[4]); memset(bucket_, 0, M * sizeof(int)); hipMemcpy(d_color, color_, N * sizeof(int), hipMemcpyHostToDevice); hipMemset(d_bucket, 0, M * sizeof(int)); hipLaunchKernelGGL(( histogram), dim3((N + 255) / 256), dim3(256), 0, 0, N, d_color, d_bucket); hipMemcpy(bucket_, d_bucket, M * sizeof(int), hipMemcpyDeviceToHost); printf("gpu bucket: %d,%d,%d,%d,%d\n", bucket_[0], bucket_[1], bucket_[2], bucket_[3], bucket_[4]); hipFree(d_color); hipFree(d_bucket); free(color_); free(bucket_); } /** * Introduce local maximums and update global only when new local maximum found */ __global__ void global_max(int* values, int* global_max, int* local_max, int num_locals) { int i = threadIdx.x + blockDim.x * blockIdx.x; int val = values[i]; int li = i % num_locals; int old_max = atomicMax(&local_max[li], val); if (old_max < val) { atomicMax(global_max, val); } } void host_global_max() { int N = 1 << 20; int num_locals_ = 1 << 10; int *values_, *d_values, *d_local_max, *d_global_max; values_ = (int*)malloc(N * sizeof(int)); hipMalloc(&d_values, N * sizeof(int)); hipMalloc(&d_local_max, num_locals_ * sizeof(int)); hipMalloc(&d_global_max, sizeof(int)); int h_global_max = -1; for (int i = 0; i < N; i++) { values_[i] = rand(); if (h_global_max < values_[i]) h_global_max = values_[i]; } printf("cpu global_max: %d\n", h_global_max); h_global_max = -1; hipMemcpy(d_values, values_, N * sizeof(int), hipMemcpyHostToDevice); hipMemset(d_local_max, 0, num_locals_ * sizeof(int)); hipMemset(d_global_max, 0, sizeof(int)); hipLaunchKernelGGL(( global_max), dim3((N + 255) / 256), dim3(256), 0, 0, d_values, d_global_max, d_local_max, num_locals_); hipMemcpy(&h_global_max, d_global_max, sizeof(int), hipMemcpyDeviceToHost); printf("gpu global_max: %d\n", h_global_max); hipFree(d_values); hipFree(d_local_max); hipFree(d_global_max); free(values_); } int main(void) { host_histogram(); host_global_max(); }
3ffb66e7cd5e6bd19ab06f0f445d101c2ad76906.cu
#include <stdio.h> __global__ void histogram(int n, int* color, int* bucket) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { int c = color[i]; atomicAdd(&bucket[c], 1); } } void host_histogram() { int N = 1 << 20; int M = 1 << 10; int *color_, *bucket_, *d_color, *d_bucket; color_ = (int*)malloc(N * sizeof(int)); bucket_ = (int*)malloc(M * sizeof(int)); cudaMalloc(&d_color, N * sizeof(int)); cudaMalloc(&d_bucket, M * sizeof(int)); memset(bucket_, 0, M * sizeof(int)); for (int i = 0; i < N; i++) { color_[i] = rand() % M; bucket_[color_[i]]++; } printf("cpu bucket: %d,%d,%d,%d,%d\n", bucket_[0], bucket_[1], bucket_[2], bucket_[3], bucket_[4]); memset(bucket_, 0, M * sizeof(int)); cudaMemcpy(d_color, color_, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemset(d_bucket, 0, M * sizeof(int)); histogram<<<(N + 255) / 256, 256>>>(N, d_color, d_bucket); cudaMemcpy(bucket_, d_bucket, M * sizeof(int), cudaMemcpyDeviceToHost); printf("gpu bucket: %d,%d,%d,%d,%d\n", bucket_[0], bucket_[1], bucket_[2], bucket_[3], bucket_[4]); cudaFree(d_color); cudaFree(d_bucket); free(color_); free(bucket_); } /** * Introduce local maximums and update global only when new local maximum found */ __global__ void global_max(int* values, int* global_max, int* local_max, int num_locals) { int i = threadIdx.x + blockDim.x * blockIdx.x; int val = values[i]; int li = i % num_locals; int old_max = atomicMax(&local_max[li], val); if (old_max < val) { atomicMax(global_max, val); } } void host_global_max() { int N = 1 << 20; int num_locals_ = 1 << 10; int *values_, *d_values, *d_local_max, *d_global_max; values_ = (int*)malloc(N * sizeof(int)); cudaMalloc(&d_values, N * sizeof(int)); cudaMalloc(&d_local_max, num_locals_ * sizeof(int)); cudaMalloc(&d_global_max, sizeof(int)); int h_global_max = -1; for (int i = 0; i < N; i++) { values_[i] = rand(); if (h_global_max < values_[i]) h_global_max = values_[i]; } printf("cpu global_max: %d\n", h_global_max); h_global_max = -1; cudaMemcpy(d_values, values_, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemset(d_local_max, 0, num_locals_ * sizeof(int)); cudaMemset(d_global_max, 0, sizeof(int)); global_max<<<(N + 255) / 256, 256>>>(d_values, d_global_max, d_local_max, num_locals_); cudaMemcpy(&h_global_max, d_global_max, sizeof(int), cudaMemcpyDeviceToHost); printf("gpu global_max: %d\n", h_global_max); cudaFree(d_values); cudaFree(d_local_max); cudaFree(d_global_max); free(values_); } int main(void) { host_histogram(); host_global_max(); }
54197d2dba2245dc41d0a423825f47fc8256ed9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void childKernel() { printf("Hello"); }
54197d2dba2245dc41d0a423825f47fc8256ed9e.cu
#include "includes.h" __global__ void childKernel() { printf("Hello"); }
e0d2039f93b822744e731136cbf1c0fcdf49e508.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void vecAdd(const int* A, const int* B, int* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; }
e0d2039f93b822744e731136cbf1c0fcdf49e508.cu
extern "C" __global__ void vecAdd(const int* A, const int* B, int* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; }
79bd5937cf587e69bd862766dea97b9fd5dafba8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "furthest_point_sample_cuda.cuh" namespace vision3d { void furthest_point_sample_cuda_launcher( const at::Tensor& points, at::Tensor& distances, at::Tensor& indices, const int num_samples) { CHECK_CUDA_AND_CONTIGUOUS(points); CHECK_CUDA_AND_CONTIGUOUS(distances); CHECK_CUDA_AND_CONTIGUOUS(indices); CHECK_SCALAR_TYPE_FLOAT(points); CHECK_SCALAR_TYPE_FLOAT(distances); CHECK_SCALAR_TYPE_LONG(indices); at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int batch_size = points.size(0); int num_points = points.size(1); const float* points_ptr = points.data_ptr<float>(); float* distances_ptr = distances.data_ptr<float>(); long* indices_ptr = indices.data_ptr<long>(); size_t block_dim = opt_n_threads(num_points); switch (block_dim) { case 512: hipLaunchKernelGGL(( furthest_point_sample_kernel<512>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 256: hipLaunchKernelGGL(( furthest_point_sample_kernel<256>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 128: hipLaunchKernelGGL(( furthest_point_sample_kernel<128>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 64: hipLaunchKernelGGL(( furthest_point_sample_kernel<64>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 32: hipLaunchKernelGGL(( furthest_point_sample_kernel<32>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 16: hipLaunchKernelGGL(( furthest_point_sample_kernel<16>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 8: hipLaunchKernelGGL(( furthest_point_sample_kernel<8>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 4: hipLaunchKernelGGL(( furthest_point_sample_kernel<4>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 2: hipLaunchKernelGGL(( furthest_point_sample_kernel<2>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 1: hipLaunchKernelGGL(( furthest_point_sample_kernel<1>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; default: hipLaunchKernelGGL(( furthest_point_sample_kernel<512>), dim3(batch_size), dim3(block_dim), 0, stream, batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); } AT_CUDA_CHECK(hipGetLastError()); } } // namespace vision3d
79bd5937cf587e69bd862766dea97b9fd5dafba8.cu
#include "furthest_point_sample_cuda.cuh" namespace vision3d { void furthest_point_sample_cuda_launcher( const at::Tensor& points, at::Tensor& distances, at::Tensor& indices, const int num_samples) { CHECK_CUDA_AND_CONTIGUOUS(points); CHECK_CUDA_AND_CONTIGUOUS(distances); CHECK_CUDA_AND_CONTIGUOUS(indices); CHECK_SCALAR_TYPE_FLOAT(points); CHECK_SCALAR_TYPE_FLOAT(distances); CHECK_SCALAR_TYPE_LONG(indices); at::cuda::CUDAGuard device_guard(points.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); int batch_size = points.size(0); int num_points = points.size(1); const float* points_ptr = points.data_ptr<float>(); float* distances_ptr = distances.data_ptr<float>(); long* indices_ptr = indices.data_ptr<long>(); size_t block_dim = opt_n_threads(num_points); switch (block_dim) { case 512: furthest_point_sample_kernel<512><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 256: furthest_point_sample_kernel<256><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 128: furthest_point_sample_kernel<128><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 64: furthest_point_sample_kernel<64><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 32: furthest_point_sample_kernel<32><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 16: furthest_point_sample_kernel<16><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 8: furthest_point_sample_kernel<8><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 4: furthest_point_sample_kernel<4><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 2: furthest_point_sample_kernel<2><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; case 1: furthest_point_sample_kernel<1><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); break; default: furthest_point_sample_kernel<512><<<batch_size, block_dim, 0, stream>>>( batch_size, num_points, num_samples, points_ptr, distances_ptr, indices_ptr); } AT_CUDA_CHECK(cudaGetLastError()); } } // namespace vision3d
08a1897cad285bb6e56e94c76f20d7d260a20fc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> hipError_t addWithCuda(unsigned long long int liczba, bool &pierwsza); bool CPU_sprawdz(unsigned long long int liczba); #define NUM_THREADS 512 #define NUM_BLOCKS 1024 __global__ void addKernel(unsigned long long int *liczba, bool *pierwsza) { if (*pierwsza == true) { unsigned long long int idx = 2 * (blockIdx.x*blockDim.x + threadIdx.x) + 3; while (idx*idx <= *liczba) { if (*liczba % (idx) == 0) *pierwsza = false; if (*pierwsza == false) return; idx += blockDim.x*gridDim.x; if (idx*idx > *liczba) break; } } } int main() { unsigned long long int liczba = 2 ^ 64 - 1; bool pierwsza; clock_t t1, t2; printf("Podaj liczbe:"); scanf("%llu", &liczba); while (liczba > 18446744073709551615) { printf("Podaj liczbe:"); scanf("%llu", &liczba); } t1 = clock(); //pierwsza = CPU_sprawdz(liczba); t2 = clock(); //printf("CPU wynik: %d w %lf \n", pierwsza, (double)(t2-t1)/CLOCKS_PER_SEC); // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(liczba, pierwsza); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } ///printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", // c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } bool CPU_sprawdz(unsigned long long int liczba) { for (unsigned long long int i = 2; i < liczba; i++) { if (liczba % i == 0) { printf("%d , %d \n", liczba, i); return false; } } return true; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(unsigned long long int liczba, bool &pierwsza) { pierwsza = true; unsigned long long int *dev_liczba = NULL; unsigned long long int *dev_zakres = NULL; bool *dev_pierwsza = NULL; hipError_t cudaStatus; clock_t t1, t2; float time_GPU = 0; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } if (liczba % 2 == 0) pierwsza = false; else { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_liczba, sizeof(unsigned long long int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!1"); goto Error; } cudaStatus = hipMalloc((void**)&dev_pierwsza, sizeof(bool)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!2"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_liczba, &liczba, sizeof(unsigned long long int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed3!"); goto Error; } cudaStatus = hipMemcpy(dev_pierwsza, &pierwsza, sizeof(bool), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!4"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel << < NUM_BLOCKS, NUM_THREADS >> > (dev_liczba, dev_pierwsza); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(&pierwsza, dev_pierwsza, sizeof(bool), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed5!"); goto Error; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time_GPU, start, stop); hipEventDestroy(start); hipEventDestroy(stop); } printf("GPU wynik: %d w czasie: %LF ms \n", pierwsza, time_GPU); Error: hipFree(dev_liczba); hipFree(dev_pierwsza); return cudaStatus; }
08a1897cad285bb6e56e94c76f20d7d260a20fc6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> cudaError_t addWithCuda(unsigned long long int liczba, bool &pierwsza); bool CPU_sprawdz(unsigned long long int liczba); #define NUM_THREADS 512 #define NUM_BLOCKS 1024 __global__ void addKernel(unsigned long long int *liczba, bool *pierwsza) { if (*pierwsza == true) { unsigned long long int idx = 2 * (blockIdx.x*blockDim.x + threadIdx.x) + 3; while (idx*idx <= *liczba) { if (*liczba % (idx) == 0) *pierwsza = false; if (*pierwsza == false) return; idx += blockDim.x*gridDim.x; if (idx*idx > *liczba) break; } } } int main() { unsigned long long int liczba = 2 ^ 64 - 1; bool pierwsza; clock_t t1, t2; printf("Podaj liczbe:"); scanf("%llu", &liczba); while (liczba > 18446744073709551615) { printf("Podaj liczbe:"); scanf("%llu", &liczba); } t1 = clock(); //pierwsza = CPU_sprawdz(liczba); t2 = clock(); //printf("CPU wynik: %d w %lf \n", pierwsza, (double)(t2-t1)/CLOCKS_PER_SEC); // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(liczba, pierwsza); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } ///printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", // c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } bool CPU_sprawdz(unsigned long long int liczba) { for (unsigned long long int i = 2; i < liczba; i++) { if (liczba % i == 0) { printf("%d , %d \n", liczba, i); return false; } } return true; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(unsigned long long int liczba, bool &pierwsza) { pierwsza = true; unsigned long long int *dev_liczba = NULL; unsigned long long int *dev_zakres = NULL; bool *dev_pierwsza = NULL; cudaError_t cudaStatus; clock_t t1, t2; float time_GPU = 0; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } if (liczba % 2 == 0) pierwsza = false; else { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_liczba, sizeof(unsigned long long int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!1"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_pierwsza, sizeof(bool)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!2"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_liczba, &liczba, sizeof(unsigned long long int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed3!"); goto Error; } cudaStatus = cudaMemcpy(dev_pierwsza, &pierwsza, sizeof(bool), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!4"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel << < NUM_BLOCKS, NUM_THREADS >> > (dev_liczba, dev_pierwsza); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(&pierwsza, dev_pierwsza, sizeof(bool), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed5!"); goto Error; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time_GPU, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); } printf("GPU wynik: %d w czasie: %LF ms \n", pierwsza, time_GPU); Error: cudaFree(dev_liczba); cudaFree(dev_pierwsza); return cudaStatus; }
f39cfa5096fced897f08fb19063f400f7f87f034.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <ATen/NamedTensorUtils.h> #include <ATen/hip/HIPBlas.h> #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/LinearAlgebra.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Resize.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Reduce.cuh> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <c10/util/MaybeOwned.h> namespace at { namespace native { namespace { c10::MaybeOwned<Tensor> inline prepare_matrix_for_cublas(const Tensor& tensor, bool& transpose_tensor) { if (tensor.is_non_overlapping_and_dense()) { // common case transpose_tensor = tensor.is_contiguous(); return c10::MaybeOwned<Tensor>::borrowed(tensor); } IntArrayRef tensor_strides = tensor.strides(); IntArrayRef tensor_sizes = tensor.sizes(); if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) { transpose_tensor = false; return c10::MaybeOwned<Tensor>::borrowed(tensor); } else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) { transpose_tensor = true; return c10::MaybeOwned<Tensor>::borrowed(tensor); } else { transpose_tensor = true; return c10::MaybeOwned<Tensor>::owned(tensor.clone(at::MemoryFormat::Contiguous)); } } } // namespace Tensor prepare_batch_matrix_for_cublas(const Tensor& tensor, bool& transpose_tensor, int64_t& ld_tensor, bool transpose_result, int64_t m, int64_t n) { IntArrayRef tensor_strides = tensor.strides(); Tensor tensor_; int fast_dim = transpose_result ? 2 : 1; int leading_dim = transpose_result ? 1 : 2; if (tensor_strides[fast_dim] == 1 && (tensor_strides[leading_dim] >= std::max<int64_t>(1, m))) { transpose_tensor = false; tensor_ = tensor; ld_tensor = tensor_strides[leading_dim]; } else if ((tensor_strides[leading_dim] == 1) && (tensor_strides[fast_dim] >= std::max<int64_t>(1, n))) { transpose_tensor = true; tensor_ = tensor; ld_tensor = tensor_strides[fast_dim]; } else { transpose_tensor = !transpose_result; // gemm call requires leading dimension and stride parameters to be non-zero bool is_stride_non_zero = tensor.stride(1) != 0 && tensor.stride(2) != 0; if (tensor.is_contiguous() && is_stride_non_zero) { tensor_ = tensor; } else { tensor_ = tensor.clone(at::MemoryFormat::Contiguous); } ld_tensor = tensor_.stride(1); } return tensor_; } namespace { Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha) { TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D"); TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}}; checkAllSameGPU("addmm", args); IntArrayRef mat1_sizes = mat1.sizes(); IntArrayRef mat2_sizes = mat2.sizes(); IntArrayRef self__sizes; c10::MaybeOwned<Tensor> self_; if (&result != &self) { self_ = expand_size(self, {mat1_sizes[0], mat2_sizes[1]}, "addmm"); self__sizes = self_->sizes(); } else { self_ = c10::MaybeOwned<Tensor>::borrowed(self); self__sizes = self_->sizes(); TORCH_CHECK(result.dim() == 2, "tensors must be 2-D"); TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0"); TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1"); } TORCH_CHECK( mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0", " mat1 dim1:", mat1_sizes[1], " mat2 dim0: ", mat2_sizes[0]); if (&result != &self) { at::native::resize_output(result, self__sizes); if (beta.toComplexDouble() != 0.0) { at::native::copy_(result, *self_); } } IntArrayRef result_sizes = result.sizes(); if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) { return result; } bool transpose_result; c10::MaybeOwned<Tensor> result_ = prepare_matrix_for_cublas(result, transpose_result); bool transpose_mat1; bool transpose_mat2; c10::MaybeOwned<Tensor> mat1_ = prepare_matrix_for_cublas(transpose_result ? mat2 : mat1, transpose_mat1); c10::MaybeOwned<Tensor> mat2_ = prepare_matrix_for_cublas(transpose_result ? mat1 : mat2, transpose_mat2); if (transpose_result) { transpose_mat1 = !transpose_mat1; transpose_mat2 = !transpose_mat2; mat1_sizes = mat1_->sizes(); mat2_sizes = mat2_->sizes(); } int64_t m = mat1_sizes[transpose_result ? 1 : 0]; int64_t k = mat1_sizes[transpose_result ? 0 : 1]; int64_t n = mat2_sizes[transpose_result ? 0 : 1]; int64_t mat1_ld = mat1_->stride((transpose_mat1 == transpose_result) ? 1 : 0); int64_t mat2_ld = mat2_->stride((transpose_mat2 == transpose_result) ? 1 : 0); int64_t result_ld = result_->stride(transpose_result ? 0 : 1); at::ScalarType scalar_type = self_->scalar_type(); if (mat1.numel() == 0) { // By definition, when beta==0, values in self should be ignored. nans and infs // should not propagate if (beta.toComplexDouble() == 0.) { return result.zero_(); } // TODO: We could squeeze some perf by calling at::cuda::mul_out here instead, to bypass the dispatcher. // That requires some fixing some internal build dependencies though. return at::mul_out( result, self, at::native::scalar_tensor( beta, self.scalar_type(), c10::nullopt /* layout */, at::kCPU, c10::nullopt /* pin_memory */)); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] { scalar_t alpha_val = alpha.to<scalar_t>(); scalar_t beta_val = beta.to<scalar_t>(); scalar_t* mat1_ptr = mat1_->data_ptr<scalar_t>(); scalar_t* mat2_ptr = mat2_->data_ptr<scalar_t>(); scalar_t* result_ptr = result_->data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( transpose_mat1 ? 't' : 'n', transpose_mat2 ? 't' : 'n', m, n, k, alpha_val, mat1_ptr, mat1_ld, mat2_ptr, mat2_ld, beta_val, result_ptr, result_ld ); }); if (!result.is_same(*result_)) { result.copy_(*result_); } return result; } Tensor& baddbmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha) { TORCH_CHECK(self.dim() == 3, "self must be a 3D tensor"); TORCH_CHECK(batch1.dim() == 3, "batch1 must be a 3D tensor"); TORCH_CHECK(batch2.dim() == 3, "batch2 must be a 3D tensor"); TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {batch1, "batch1", 2}, {batch2, "batch2", 3}}; checkAllSameGPU("baddbmm", args); IntArrayRef batch1_sizes = batch1.sizes(); IntArrayRef batch2_sizes = batch2.sizes(); IntArrayRef self_sizes = self.sizes(); TORCH_CHECK(self_sizes[0] == batch1_sizes[0], "self dim 0 must match batch1 dim 0"); TORCH_CHECK(self_sizes[0] == batch2_sizes[0], "self dim 0 must match batch2 dim 0"); TORCH_CHECK(self_sizes[1] == batch1_sizes[1], "self dim 1 must match batch1 dim 1"); TORCH_CHECK(self_sizes[2] == batch2_sizes[2], "self dim 2 must match batch2 dim 2"); TORCH_CHECK(batch1_sizes[2] == batch2_sizes[1], "batch1 dim 2 must match batch2 dim 1"); if (!result.is_same(self)) { result.resize_as_(self); if (beta.to<c10::complex<double>>() != 0.0) { result.copy_(self); } } // handle pathological cases that blas may not like if (result.numel() == 0) { return result; } else if (batch1_sizes[2] == 0) { if (beta.to<c10::complex<double>>() == 0.0) { return result.zero_(); } else { return result.mul_(beta); } } bool transpose_result = false; Tensor result_; IntArrayRef result_strides = result.strides(); IntArrayRef result_sizes = result.sizes(); if ((result_strides[1] == 1) && ((result_sizes[2] == 1) || (result_strides[2] >= std::max<int64_t>(1, result_sizes[1])))) { result_ = result; } else if ((result_strides[2] == 1) && (result_sizes[1] == 1 || (result_strides[1] >= std::max<int64_t>(1, result_sizes[2])))) { transpose_result = true; result_ = result; } else { result_ = result.transpose(1, 2).clone(at::MemoryFormat::Contiguous); result_ = result_.transpose(1, 2); } int leading_dim = transpose_result ? 1 : 2; Tensor batch1_ = transpose_result ? batch2 : batch1; Tensor batch2_ = transpose_result ? batch1 : batch2; int64_t m = result_sizes[transpose_result ? 2 : 1]; int64_t n = result_sizes[leading_dim]; int64_t k = batch1_.size(leading_dim); int64_t lda, ldb, ldc; bool transpose_batch1, transpose_batch2; batch1_ = prepare_batch_matrix_for_cublas(batch1_, transpose_batch1, lda, transpose_result, m, k); batch2_ = prepare_batch_matrix_for_cublas(batch2_, transpose_batch2, ldb, transpose_result, k, n); ldc = result_.stride(leading_dim); int64_t num_batches = result_.size(0); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "baddbmm_cuda", [&] { scalar_t alpha_val = alpha.to<scalar_t>(); scalar_t beta_val = beta.to<scalar_t>(); scalar_t* batch1_ptr = batch1_.data_ptr<scalar_t>(); scalar_t* batch2_ptr = batch2_.data_ptr<scalar_t>(); scalar_t* result_ptr = result_.data_ptr<scalar_t>(); at::cuda::blas::bgemm<scalar_t>( transpose_batch1 ? 't' : 'n', transpose_batch2 ? 't' : 'n', m, n, k, alpha_val, batch1_ptr, lda, batch1_.stride(0), batch2_ptr, ldb, batch2_.stride(0), beta_val, result_ptr, ldc, result_.stride(0), num_batches ); }); if (!result.is_same(result_)) { result.copy_(result_); } return result; } } // anonymous namespace Tensor& mm_out_cuda(const Tensor& self, const Tensor& mat2, Tensor& result) { result.resize_({ self.size(0), mat2.size(1) }); return addmm_out_cuda_impl(result, result, self, mat2, 0, 1); } Tensor mm_cuda(const Tensor& self, const Tensor& mat2) { Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options()); return addmm_out_cuda_impl(result, result, self, mat2, 0, 1); } Tensor& addmm_out_cuda(const Tensor &self, const Tensor &mat1, const Tensor &mat2, const Scalar& beta, const Scalar& alpha, Tensor &out) { { at::NoNamesGuard guard; Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha); } at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self); return out; } Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha) { Tensor out = at::empty({0}, self.options()); addmm_out_cuda(self, mat1, mat2, beta, alpha, out); return out; } Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha) { addmm_out_cuda(self, mat1, mat2, beta, alpha, self); return self; } Tensor& baddbmm_out_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha, Tensor &result) { auto self_ = &result == &self ? c10::MaybeOwned<Tensor>::borrowed(self) : expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm"); { at::NoNamesGuard guard; baddbmm_out_cuda_impl(result, *self_, batch1, batch2, beta, alpha); } namedinference::propagate_names_if_nonempty( result, namedinference::compute_baddbmm_outnames(result, batch1, batch2, self)); return result; } Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha) { Tensor out = at::empty({0}, self.options()); return baddbmm_out_cuda(self, batch1, batch2, beta, alpha, out); } Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha) { return baddbmm_out_cuda(self, batch1, batch2, beta, alpha, self); } Tensor& bmm_out_cuda(const Tensor& batch1, const Tensor& batch2, Tensor &result) { result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) }); Scalar beta(0.0); Scalar alpha(1.0); { NoNamesGuard guard; baddbmm_out_cuda_impl(result, result, batch1, batch2, beta, alpha); } namedinference::propagate_names_if_nonempty( result, namedinference::compute_bmm_outnames(result, batch1, batch2)); return result; } Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) { Tensor result = at::empty({0}, self.options()); return native::bmm_out_cuda(self, mat2, result); } namespace { inline void dot_check(const Tensor& self, const Tensor& other) { TORCH_CHECK( self.dim() == 1 && other.dim() == 1, "1D tensors expected, but got ", self.dim(), "D and ", other.dim(), "D tensors"); TORCH_CHECK( self.scalar_type() == other.scalar_type(), "dot : expected both vectors to have same dtype, but found ", self.scalar_type(), " and ", other.scalar_type()); TORCH_CHECK( self.numel() == other.numel(), "inconsistent tensor size, expected tensor [", self.numel(), "] and src [", other.numel(), "] to have the same number of elements, but got ", self.numel(), " and ", other.numel(), " elements respectively"); TORCH_CHECK( self.device() == other.device(), "expected all tensors to be on the same device. Found: ", self.device(), ", ", other.device()); TORCH_CHECK( (self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) && (other.stride(0) <= INT_MAX), "dot only supports n, incx, incy with the bound [val] <= %d", INT_MAX); } } // anonymous namespace Tensor dot_cuda(const Tensor& self, const Tensor& other) { at::NoNamesGuard guard; dot_check(self, other); const int n = static_cast<int>(self.numel()); int incx = static_cast<int>(self.stride(0)); int incy = static_cast<int>(other.stride(0)); if (n == 1) { incx = 1; incy = 1; } return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] { Tensor result = at::empty({}, self.options()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::PointerModeGuard pointerModeGuard(handle, HIPBLAS_POINTER_MODE_DEVICE); at::cuda::blas::dot<scalar_t>( handle, n, self.data_ptr<scalar_t>(), incx, other.data_ptr<scalar_t>(), incy, result.data_ptr<scalar_t>()); return result; }); } Tensor vdot_cuda(const Tensor& self, const Tensor& other) { if (!self.is_complex()) { return dot_cuda(self, other); } at::NoNamesGuard guard; dot_check(self, other); const int n = static_cast<int>(self.numel()); int incx = static_cast<int>(self.stride(0)); int incy = static_cast<int>(other.stride(0)); if (n == 1) { incx = 1; incy = 1; } return AT_DISPATCH_COMPLEX_TYPES(self.scalar_type(), "vdot", [&] { Tensor result = at::empty({}, self.options()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::PointerModeGuard pointerModeGuard( handle, HIPBLAS_POINTER_MODE_DEVICE); at::cuda::blas::vdot<scalar_t>( handle, n, self.data_ptr<scalar_t>(), incx, other.data_ptr<scalar_t>(), incy, result.data_ptr<scalar_t>()); return result; }); } namespace { void addr_kernel_cuda(TensorIterator &iter, const Scalar& beta, const Scalar& alpha) { if (iter.dtype() == ScalarType::Bool) { using scalar_t = bool; auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); // when beta is false, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == false) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val && vec1_val && vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val); } ); } return; } AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf, iter.dtype(), "addr_cuda", [&] { auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); scalar_t zero_val(0); // when beta==0, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == zero_val) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val * vec1_val * vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return beta_val * self_val + alpha_val * vec1_val * vec2_val; } ); } }); } // This reduction accumulates results as the type `acc_t`. By default, when // `scalar_t` is complex, `acc_t` is the downgraded real number type. // Otherwise, `acc_t` and `scalar_t` are the same type. template <typename scalar_t, typename acc_t=typename scalar_value_type<scalar_t>::type, typename out_t=typename scalar_value_type<scalar_t>::type> void linalg_vector_norm_kernel_cuda_impl(TensorIterator& iter, Scalar ord) { double ord_val; if (ord.isFloatingPoint()) { ord_val = ord.to<double>(); } else { TORCH_CHECK(false, "linalg.vector_norm expects ord to be float"); } if (iter.numel() == 0) { iter.output().fill_((ord_val < 0) ? INFINITY : 0); return; } if (ord_val == 0) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<scalar_t, acc_t>(), 0); } else if (ord_val == 1) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<scalar_t, acc_t>(), 0); } else if (ord_val == 2) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormTwoOps<scalar_t, acc_t>(), 0); } else if (ord_val == INFINITY) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<scalar_t, acc_t>(), 0); } else if (ord_val == -INFINITY) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<scalar_t, acc_t>(), std::numeric_limits<acc_t>::infinity()); } else { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<scalar_t, acc_t>{ static_cast<acc_t>(ord_val) }, 0); } // For complex outputs, the above kernels do not touch the imaginary values, // so we must zero them out if (isComplexType(iter.output().scalar_type())) { at::imag(iter.output()).zero_(); } } static void linalg_vector_norm_kernel_cuda(TensorIterator& iter, Scalar ord) { if (iter.output().scalar_type() == kHalf) { return linalg_vector_norm_kernel_cuda_impl<at::Half, float>(iter, ord); } else if (iter.input_dtype() == kHalf && iter.output().scalar_type() == kFloat) { // type promotion that does cast and reduction in a single kernel return linalg_vector_norm_kernel_cuda_impl<at::Half, float, float>(iter, ord); } else if(iter.output().scalar_type() == kBFloat16) { return linalg_vector_norm_kernel_cuda_impl<at::BFloat16, float>(iter, ord); } else if (iter.input_dtype() == kBFloat16 && iter.output().scalar_type() == kFloat) { // type promotion that does cast and reduction in a single kernel return linalg_vector_norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, ord); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "linalg_vector_norm_cuda", [&] { linalg_vector_norm_kernel_cuda_impl<scalar_t>(iter, ord); }); } } // anonymous namespace REGISTER_DISPATCH(addr_stub, &addr_kernel_cuda); REGISTER_DISPATCH(linalg_vector_norm_stub, &linalg_vector_norm_kernel_cuda); }}
f39cfa5096fced897f08fb19063f400f7f87f034.cu
#include <ATen/ATen.h> #include <ATen/LegacyTHFunctionsCUDA.h> #include <ATen/NamedTensorUtils.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/LinearAlgebra.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Resize.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <c10/util/MaybeOwned.h> namespace at { namespace native { namespace { c10::MaybeOwned<Tensor> inline prepare_matrix_for_cublas(const Tensor& tensor, bool& transpose_tensor) { if (tensor.is_non_overlapping_and_dense()) { // common case transpose_tensor = tensor.is_contiguous(); return c10::MaybeOwned<Tensor>::borrowed(tensor); } IntArrayRef tensor_strides = tensor.strides(); IntArrayRef tensor_sizes = tensor.sizes(); if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) { transpose_tensor = false; return c10::MaybeOwned<Tensor>::borrowed(tensor); } else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) { transpose_tensor = true; return c10::MaybeOwned<Tensor>::borrowed(tensor); } else { transpose_tensor = true; return c10::MaybeOwned<Tensor>::owned(tensor.clone(at::MemoryFormat::Contiguous)); } } } // namespace Tensor prepare_batch_matrix_for_cublas(const Tensor& tensor, bool& transpose_tensor, int64_t& ld_tensor, bool transpose_result, int64_t m, int64_t n) { IntArrayRef tensor_strides = tensor.strides(); Tensor tensor_; int fast_dim = transpose_result ? 2 : 1; int leading_dim = transpose_result ? 1 : 2; if (tensor_strides[fast_dim] == 1 && (tensor_strides[leading_dim] >= std::max<int64_t>(1, m))) { transpose_tensor = false; tensor_ = tensor; ld_tensor = tensor_strides[leading_dim]; } else if ((tensor_strides[leading_dim] == 1) && (tensor_strides[fast_dim] >= std::max<int64_t>(1, n))) { transpose_tensor = true; tensor_ = tensor; ld_tensor = tensor_strides[fast_dim]; } else { transpose_tensor = !transpose_result; // gemm call requires leading dimension and stride parameters to be non-zero bool is_stride_non_zero = tensor.stride(1) != 0 && tensor.stride(2) != 0; if (tensor.is_contiguous() && is_stride_non_zero) { tensor_ = tensor; } else { tensor_ = tensor.clone(at::MemoryFormat::Contiguous); } ld_tensor = tensor_.stride(1); } return tensor_; } namespace { Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha) { TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D"); TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}}; checkAllSameGPU("addmm", args); IntArrayRef mat1_sizes = mat1.sizes(); IntArrayRef mat2_sizes = mat2.sizes(); IntArrayRef self__sizes; c10::MaybeOwned<Tensor> self_; if (&result != &self) { self_ = expand_size(self, {mat1_sizes[0], mat2_sizes[1]}, "addmm"); self__sizes = self_->sizes(); } else { self_ = c10::MaybeOwned<Tensor>::borrowed(self); self__sizes = self_->sizes(); TORCH_CHECK(result.dim() == 2, "tensors must be 2-D"); TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0"); TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1"); } TORCH_CHECK( mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0", " mat1 dim1:", mat1_sizes[1], " mat2 dim0: ", mat2_sizes[0]); if (&result != &self) { at::native::resize_output(result, self__sizes); if (beta.toComplexDouble() != 0.0) { at::native::copy_(result, *self_); } } IntArrayRef result_sizes = result.sizes(); if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) { return result; } bool transpose_result; c10::MaybeOwned<Tensor> result_ = prepare_matrix_for_cublas(result, transpose_result); bool transpose_mat1; bool transpose_mat2; c10::MaybeOwned<Tensor> mat1_ = prepare_matrix_for_cublas(transpose_result ? mat2 : mat1, transpose_mat1); c10::MaybeOwned<Tensor> mat2_ = prepare_matrix_for_cublas(transpose_result ? mat1 : mat2, transpose_mat2); if (transpose_result) { transpose_mat1 = !transpose_mat1; transpose_mat2 = !transpose_mat2; mat1_sizes = mat1_->sizes(); mat2_sizes = mat2_->sizes(); } int64_t m = mat1_sizes[transpose_result ? 1 : 0]; int64_t k = mat1_sizes[transpose_result ? 0 : 1]; int64_t n = mat2_sizes[transpose_result ? 0 : 1]; int64_t mat1_ld = mat1_->stride((transpose_mat1 == transpose_result) ? 1 : 0); int64_t mat2_ld = mat2_->stride((transpose_mat2 == transpose_result) ? 1 : 0); int64_t result_ld = result_->stride(transpose_result ? 0 : 1); at::ScalarType scalar_type = self_->scalar_type(); if (mat1.numel() == 0) { // By definition, when beta==0, values in self should be ignored. nans and infs // should not propagate if (beta.toComplexDouble() == 0.) { return result.zero_(); } // TODO: We could squeeze some perf by calling at::cuda::mul_out here instead, to bypass the dispatcher. // That requires some fixing some internal build dependencies though. return at::mul_out( result, self, at::native::scalar_tensor( beta, self.scalar_type(), c10::nullopt /* layout */, at::kCPU, c10::nullopt /* pin_memory */)); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] { scalar_t alpha_val = alpha.to<scalar_t>(); scalar_t beta_val = beta.to<scalar_t>(); scalar_t* mat1_ptr = mat1_->data_ptr<scalar_t>(); scalar_t* mat2_ptr = mat2_->data_ptr<scalar_t>(); scalar_t* result_ptr = result_->data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( transpose_mat1 ? 't' : 'n', transpose_mat2 ? 't' : 'n', m, n, k, alpha_val, mat1_ptr, mat1_ld, mat2_ptr, mat2_ld, beta_val, result_ptr, result_ld ); }); if (!result.is_same(*result_)) { result.copy_(*result_); } return result; } Tensor& baddbmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha) { TORCH_CHECK(self.dim() == 3, "self must be a 3D tensor"); TORCH_CHECK(batch1.dim() == 3, "batch1 must be a 3D tensor"); TORCH_CHECK(batch2.dim() == 3, "batch2 must be a 3D tensor"); TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {batch1, "batch1", 2}, {batch2, "batch2", 3}}; checkAllSameGPU("baddbmm", args); IntArrayRef batch1_sizes = batch1.sizes(); IntArrayRef batch2_sizes = batch2.sizes(); IntArrayRef self_sizes = self.sizes(); TORCH_CHECK(self_sizes[0] == batch1_sizes[0], "self dim 0 must match batch1 dim 0"); TORCH_CHECK(self_sizes[0] == batch2_sizes[0], "self dim 0 must match batch2 dim 0"); TORCH_CHECK(self_sizes[1] == batch1_sizes[1], "self dim 1 must match batch1 dim 1"); TORCH_CHECK(self_sizes[2] == batch2_sizes[2], "self dim 2 must match batch2 dim 2"); TORCH_CHECK(batch1_sizes[2] == batch2_sizes[1], "batch1 dim 2 must match batch2 dim 1"); if (!result.is_same(self)) { result.resize_as_(self); if (beta.to<c10::complex<double>>() != 0.0) { result.copy_(self); } } // handle pathological cases that blas may not like if (result.numel() == 0) { return result; } else if (batch1_sizes[2] == 0) { if (beta.to<c10::complex<double>>() == 0.0) { return result.zero_(); } else { return result.mul_(beta); } } bool transpose_result = false; Tensor result_; IntArrayRef result_strides = result.strides(); IntArrayRef result_sizes = result.sizes(); if ((result_strides[1] == 1) && ((result_sizes[2] == 1) || (result_strides[2] >= std::max<int64_t>(1, result_sizes[1])))) { result_ = result; } else if ((result_strides[2] == 1) && (result_sizes[1] == 1 || (result_strides[1] >= std::max<int64_t>(1, result_sizes[2])))) { transpose_result = true; result_ = result; } else { result_ = result.transpose(1, 2).clone(at::MemoryFormat::Contiguous); result_ = result_.transpose(1, 2); } int leading_dim = transpose_result ? 1 : 2; Tensor batch1_ = transpose_result ? batch2 : batch1; Tensor batch2_ = transpose_result ? batch1 : batch2; int64_t m = result_sizes[transpose_result ? 2 : 1]; int64_t n = result_sizes[leading_dim]; int64_t k = batch1_.size(leading_dim); int64_t lda, ldb, ldc; bool transpose_batch1, transpose_batch2; batch1_ = prepare_batch_matrix_for_cublas(batch1_, transpose_batch1, lda, transpose_result, m, k); batch2_ = prepare_batch_matrix_for_cublas(batch2_, transpose_batch2, ldb, transpose_result, k, n); ldc = result_.stride(leading_dim); int64_t num_batches = result_.size(0); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "baddbmm_cuda", [&] { scalar_t alpha_val = alpha.to<scalar_t>(); scalar_t beta_val = beta.to<scalar_t>(); scalar_t* batch1_ptr = batch1_.data_ptr<scalar_t>(); scalar_t* batch2_ptr = batch2_.data_ptr<scalar_t>(); scalar_t* result_ptr = result_.data_ptr<scalar_t>(); at::cuda::blas::bgemm<scalar_t>( transpose_batch1 ? 't' : 'n', transpose_batch2 ? 't' : 'n', m, n, k, alpha_val, batch1_ptr, lda, batch1_.stride(0), batch2_ptr, ldb, batch2_.stride(0), beta_val, result_ptr, ldc, result_.stride(0), num_batches ); }); if (!result.is_same(result_)) { result.copy_(result_); } return result; } } // anonymous namespace Tensor& mm_out_cuda(const Tensor& self, const Tensor& mat2, Tensor& result) { result.resize_({ self.size(0), mat2.size(1) }); return addmm_out_cuda_impl(result, result, self, mat2, 0, 1); } Tensor mm_cuda(const Tensor& self, const Tensor& mat2) { Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options()); return addmm_out_cuda_impl(result, result, self, mat2, 0, 1); } Tensor& addmm_out_cuda(const Tensor &self, const Tensor &mat1, const Tensor &mat2, const Scalar& beta, const Scalar& alpha, Tensor &out) { { at::NoNamesGuard guard; Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha); } at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self); return out; } Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha) { Tensor out = at::empty({0}, self.options()); addmm_out_cuda(self, mat1, mat2, beta, alpha, out); return out; } Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha) { addmm_out_cuda(self, mat1, mat2, beta, alpha, self); return self; } Tensor& baddbmm_out_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha, Tensor &result) { auto self_ = &result == &self ? c10::MaybeOwned<Tensor>::borrowed(self) : expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm"); { at::NoNamesGuard guard; baddbmm_out_cuda_impl(result, *self_, batch1, batch2, beta, alpha); } namedinference::propagate_names_if_nonempty( result, namedinference::compute_baddbmm_outnames(result, batch1, batch2, self)); return result; } Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha) { Tensor out = at::empty({0}, self.options()); return baddbmm_out_cuda(self, batch1, batch2, beta, alpha, out); } Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, const Scalar& beta, const Scalar& alpha) { return baddbmm_out_cuda(self, batch1, batch2, beta, alpha, self); } Tensor& bmm_out_cuda(const Tensor& batch1, const Tensor& batch2, Tensor &result) { result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) }); Scalar beta(0.0); Scalar alpha(1.0); { NoNamesGuard guard; baddbmm_out_cuda_impl(result, result, batch1, batch2, beta, alpha); } namedinference::propagate_names_if_nonempty( result, namedinference::compute_bmm_outnames(result, batch1, batch2)); return result; } Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) { Tensor result = at::empty({0}, self.options()); return native::bmm_out_cuda(self, mat2, result); } namespace { inline void dot_check(const Tensor& self, const Tensor& other) { TORCH_CHECK( self.dim() == 1 && other.dim() == 1, "1D tensors expected, but got ", self.dim(), "D and ", other.dim(), "D tensors"); TORCH_CHECK( self.scalar_type() == other.scalar_type(), "dot : expected both vectors to have same dtype, but found ", self.scalar_type(), " and ", other.scalar_type()); TORCH_CHECK( self.numel() == other.numel(), "inconsistent tensor size, expected tensor [", self.numel(), "] and src [", other.numel(), "] to have the same number of elements, but got ", self.numel(), " and ", other.numel(), " elements respectively"); TORCH_CHECK( self.device() == other.device(), "expected all tensors to be on the same device. Found: ", self.device(), ", ", other.device()); TORCH_CHECK( (self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) && (other.stride(0) <= INT_MAX), "dot only supports n, incx, incy with the bound [val] <= %d", INT_MAX); } } // anonymous namespace Tensor dot_cuda(const Tensor& self, const Tensor& other) { at::NoNamesGuard guard; dot_check(self, other); const int n = static_cast<int>(self.numel()); int incx = static_cast<int>(self.stride(0)); int incy = static_cast<int>(other.stride(0)); if (n == 1) { incx = 1; incy = 1; } return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] { Tensor result = at::empty({}, self.options()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::PointerModeGuard pointerModeGuard(handle, CUBLAS_POINTER_MODE_DEVICE); at::cuda::blas::dot<scalar_t>( handle, n, self.data_ptr<scalar_t>(), incx, other.data_ptr<scalar_t>(), incy, result.data_ptr<scalar_t>()); return result; }); } Tensor vdot_cuda(const Tensor& self, const Tensor& other) { if (!self.is_complex()) { return dot_cuda(self, other); } at::NoNamesGuard guard; dot_check(self, other); const int n = static_cast<int>(self.numel()); int incx = static_cast<int>(self.stride(0)); int incy = static_cast<int>(other.stride(0)); if (n == 1) { incx = 1; incy = 1; } return AT_DISPATCH_COMPLEX_TYPES(self.scalar_type(), "vdot", [&] { Tensor result = at::empty({}, self.options()); auto handle = at::cuda::getCurrentCUDABlasHandle(); at::cuda::blas::PointerModeGuard pointerModeGuard( handle, CUBLAS_POINTER_MODE_DEVICE); at::cuda::blas::vdot<scalar_t>( handle, n, self.data_ptr<scalar_t>(), incx, other.data_ptr<scalar_t>(), incy, result.data_ptr<scalar_t>()); return result; }); } namespace { void addr_kernel_cuda(TensorIterator &iter, const Scalar& beta, const Scalar& alpha) { if (iter.dtype() == ScalarType::Bool) { using scalar_t = bool; auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); // when beta is false, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == false) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val && vec1_val && vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val); } ); } return; } AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf, iter.dtype(), "addr_cuda", [&] { auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); scalar_t zero_val(0); // when beta==0, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == zero_val) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val * vec1_val * vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return beta_val * self_val + alpha_val * vec1_val * vec2_val; } ); } }); } // This reduction accumulates results as the type `acc_t`. By default, when // `scalar_t` is complex, `acc_t` is the downgraded real number type. // Otherwise, `acc_t` and `scalar_t` are the same type. template <typename scalar_t, typename acc_t=typename scalar_value_type<scalar_t>::type, typename out_t=typename scalar_value_type<scalar_t>::type> void linalg_vector_norm_kernel_cuda_impl(TensorIterator& iter, Scalar ord) { double ord_val; if (ord.isFloatingPoint()) { ord_val = ord.to<double>(); } else { TORCH_CHECK(false, "linalg.vector_norm expects ord to be float"); } if (iter.numel() == 0) { iter.output().fill_((ord_val < 0) ? INFINITY : 0); return; } if (ord_val == 0) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<scalar_t, acc_t>(), 0); } else if (ord_val == 1) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<scalar_t, acc_t>(), 0); } else if (ord_val == 2) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormTwoOps<scalar_t, acc_t>(), 0); } else if (ord_val == INFINITY) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<scalar_t, acc_t>(), 0); } else if (ord_val == -INFINITY) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<scalar_t, acc_t>(), std::numeric_limits<acc_t>::infinity()); } else { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<scalar_t, acc_t>{ static_cast<acc_t>(ord_val) }, 0); } // For complex outputs, the above kernels do not touch the imaginary values, // so we must zero them out if (isComplexType(iter.output().scalar_type())) { at::imag(iter.output()).zero_(); } } static void linalg_vector_norm_kernel_cuda(TensorIterator& iter, Scalar ord) { if (iter.output().scalar_type() == kHalf) { return linalg_vector_norm_kernel_cuda_impl<at::Half, float>(iter, ord); } else if (iter.input_dtype() == kHalf && iter.output().scalar_type() == kFloat) { // type promotion that does cast and reduction in a single kernel return linalg_vector_norm_kernel_cuda_impl<at::Half, float, float>(iter, ord); } else if(iter.output().scalar_type() == kBFloat16) { return linalg_vector_norm_kernel_cuda_impl<at::BFloat16, float>(iter, ord); } else if (iter.input_dtype() == kBFloat16 && iter.output().scalar_type() == kFloat) { // type promotion that does cast and reduction in a single kernel return linalg_vector_norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, ord); } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "linalg_vector_norm_cuda", [&] { linalg_vector_norm_kernel_cuda_impl<scalar_t>(iter, ord); }); } } // anonymous namespace REGISTER_DISPATCH(addr_stub, &addr_kernel_cuda); REGISTER_DISPATCH(linalg_vector_norm_stub, &linalg_vector_norm_kernel_cuda); }}
b3b18b335ae8f024d83b6b56b50ae8d43e203876.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2014 Jure Ratkovic */ #include <Trayc/CUDAfiles/phong.h> rtDeclareVariable(float3, shading_normal, attribute shading_normal, ); rtDeclareVariable(float3, geometric_normal, attribute geometric_normal, ); // // Transparent object shadows, no textures // rtDeclareVariable(float3, shadow_attenuation, , ); RT_PROGRAM void any_hit_glass() { const float3 world_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float nDi = fabs(dot(world_normal, ray.direction)); prd_shadow.attenuation *= 1.0f - optix::fresnel_schlick(nDi, 5.0f, 1.0f - shadow_attenuation, make_float3(1.0f)).x; rtIgnoreIntersection(); } // // Glass shader, no textures // rtDeclareVariable(float3, cutoff_color, , ); rtDeclareVariable(float, fresnel_exponent, , ); rtDeclareVariable(float, fresnel_minimum, , ); rtDeclareVariable(float, fresnel_maximum, , ); rtDeclareVariable(float, refraction_index, , ); rtDeclareVariable(float3, refraction_color, , ); rtDeclareVariable(float3, reflection_color, , ); rtDeclareVariable(float3, extinction_constant, , ); RT_PROGRAM void closest_hit_glass() { const float3 h = ray.origin + t_hit * ray.direction; const float3 n = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); // normal const float3 i = ray.direction;// incident direction float reflection = 1.0f; float3 result = make_float3(0.0f); const float3 beer_attenuation = dot(n, ray.direction) > 0.0f ? exp(extinction_constant * t_hit) : make_float3(1.0f); if(prd_radiance.depth < max_depth) { float3 t; if(refract(t, i, n, refraction_index)) { // check for external or internal reflection float cos_theta = dot(i, n); if(cos_theta < 0.0f) cos_theta = -cos_theta; else cos_theta = dot(t, n); reflection = fresnel_schlick(cos_theta, fresnel_exponent, fresnel_minimum, fresnel_maximum); const float importance = prd_radiance.importance * (1.0f - reflection) * optix::luminance(refraction_color * beer_attenuation); if(importance > importance_cutoff) { const optix::Ray ray(h, t, radiance_ray_type, scene_epsilon); PerRayData_radiance refr_prd; refr_prd.depth = prd_radiance.depth+1; refr_prd.importance = importance; rtTrace(top_object, ray, refr_prd); result += (1.0f - reflection) * refraction_color * refr_prd.result; } else result += (1.0f - reflection) * refraction_color * cutoff_color; } const float3 r = reflect(i, n); const float importance = prd_radiance.importance * reflection * optix::luminance(reflection_color * beer_attenuation); if(importance > importance_cutoff) { const optix::Ray ray(h, r, radiance_ray_type, scene_epsilon); PerRayData_radiance refl_prd; refl_prd.depth = prd_radiance.depth + 1; refl_prd.importance = importance; rtTrace(top_object, ray, refl_prd); result += reflection * reflection_color * refl_prd.result; } else result += reflection * reflection_color * cutoff_color; } prd_radiance.result = result * beer_attenuation; } rtDeclareVariable(float3, Ka, , ); rtDeclareVariable(float3, Kd, , ); rtDeclareVariable(float3, Ks, , ); rtDeclareVariable(float, phong_exponent, , ); rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> diffuse_map; rtTextureSampler<uchar4, 2, hipReadModeNormalizedFloat> specular_map; rtDeclareVariable(float3, texcoord, attribute texcoord, ); // //solid mesh with textures // RT_PROGRAM void closest_hit_solid() { const float4 pKd = tex2D(diffuse_map, texcoord.x, texcoord.y); if(prd_radiance.depth < max_depth && pKd.w < importance_cutoff) { const optix::Ray newray(ray.origin + t_hit * ray.direction, ray.direction, radiance_ray_type, scene_epsilon); prd_radiance.depth++; rtTrace(top_object, newray, prd_radiance); return; } const float3 world_shading_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float3 world_geometric_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, geometric_normal)); const float3 ffnormal = faceforward(world_shading_normal, -ray.direction, world_geometric_normal); const float4 pKs = tex2D(specular_map, texcoord.x, texcoord.y); const float3 hit_point = ray.origin + t_hit * ray.direction; const uint2 &screen = output_buffer.size(); const uint2 newLaunchIndex = make_uint2(launch_index.x, launch_index.y + myStripe * output_buffer.size().y / renderingDivisionLevel); unsigned int seed = tea<1>(screen.x * newLaunchIndex.y + newLaunchIndex.x, rnd_seed); const float occlusion = ambientOcclusion(hit_point, ffnormal, seed); //phongShade(ffnormal, make_float3(0.0f), make_float3(0.0f), make_float3(0.0f), phong_exp, reflectivity); //phongShade(make_float3(abs(ffnormal.x), abs(ffnormal.y), abs(ffnormal.z)), make_float3(0.0f), make_float3(0.0f), make_float3(0.0f), phong_exp, reflectivity); phongShade(hit_point, make_float3(pKd) * Ka, make_float3(pKd) * Kd, make_float3(pKs) * Ks, ffnormal, pKs.w * 255.0f * phong_exponent, seed); prd_radiance.result *= occlusion; } rtDeclareVariable(float3, reflectivity, , ); // //solid mesh with textures and reflectivity // RT_PROGRAM void closest_hit_reflective() { const float4 pKd = tex2D(diffuse_map, texcoord.x, texcoord.y); if(prd_radiance.depth < max_depth && pKd.w < importance_cutoff) { const optix::Ray newray(ray.origin + t_hit * ray.direction, ray.direction, radiance_ray_type, scene_epsilon); prd_radiance.depth++; rtTrace(top_object, newray, prd_radiance); return; } const float3 world_shading_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float3 world_geometric_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, geometric_normal)); const float3 ffnormal = faceforward(world_shading_normal, -ray.direction, world_geometric_normal); const float3 hit_point = ray.origin + t_hit * ray.direction; const uint2 &screen = output_buffer.size(); const uint2 newLaunchIndex = make_uint2(launch_index.x, launch_index.y + myStripe * output_buffer.size().y / renderingDivisionLevel); unsigned int seed = tea<1>(screen.x * newLaunchIndex.y + newLaunchIndex.x, rnd_seed); const float occlusion = ambientOcclusion(hit_point, ffnormal, seed); const float4 pKs = tex2D(specular_map, texcoord.x, texcoord.y); phongShade(hit_point, make_float3(pKd) * Ka, make_float3(pKd) * Kd, make_float3(pKs) * Ks, ffnormal, pKs.w * 255.0f, seed); phongReflect(hit_point, ffnormal, reflectivity); prd_radiance.result *= occlusion; } // // Terminates and fully attenuates ray after any hit if not opaque // RT_PROGRAM void any_hit_solid() { const float opacity = tex2D(diffuse_map, texcoord.x, texcoord.y).w; if(opacity < importance_cutoff) rtIgnoreIntersection(); phongShadowed(); } rtDeclareVariable(float, light_brightness, attribute light_brightness, ); rtDeclareVariable(float3, light_color, , ); // // Light with bloom effect // RT_PROGRAM void closest_hit_light() { if(light_brightness >= 1.0f) { prd_radiance.result = light_color; return; } const float3 hit_point = ray.origin + t_hit * ray.direction; const optix::Ray ray(hit_point, ray.direction, radiance_ray_type, scene_epsilon); PerRayData_radiance new_prd; new_prd.depth = prd_radiance.depth; new_prd.importance = prd_radiance.importance; rtTrace(top_object, ray, new_prd); prd_radiance.result = new_prd.result + light_color * light_brightness; } // // Ignores intersection // RT_PROGRAM void any_hit_light() { rtIgnoreIntersection(); }
b3b18b335ae8f024d83b6b56b50ae8d43e203876.cu
/* * Copyright (c) 2014 Jure Ratkovic */ #include <Trayc/CUDAfiles/phong.h> rtDeclareVariable(float3, shading_normal, attribute shading_normal, ); rtDeclareVariable(float3, geometric_normal, attribute geometric_normal, ); // // Transparent object shadows, no textures // rtDeclareVariable(float3, shadow_attenuation, , ); RT_PROGRAM void any_hit_glass() { const float3 world_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float nDi = fabs(dot(world_normal, ray.direction)); prd_shadow.attenuation *= 1.0f - optix::fresnel_schlick(nDi, 5.0f, 1.0f - shadow_attenuation, make_float3(1.0f)).x; rtIgnoreIntersection(); } // // Glass shader, no textures // rtDeclareVariable(float3, cutoff_color, , ); rtDeclareVariable(float, fresnel_exponent, , ); rtDeclareVariable(float, fresnel_minimum, , ); rtDeclareVariable(float, fresnel_maximum, , ); rtDeclareVariable(float, refraction_index, , ); rtDeclareVariable(float3, refraction_color, , ); rtDeclareVariable(float3, reflection_color, , ); rtDeclareVariable(float3, extinction_constant, , ); RT_PROGRAM void closest_hit_glass() { const float3 h = ray.origin + t_hit * ray.direction; const float3 n = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); // normal const float3 i = ray.direction;// incident direction float reflection = 1.0f; float3 result = make_float3(0.0f); const float3 beer_attenuation = dot(n, ray.direction) > 0.0f ? exp(extinction_constant * t_hit) : make_float3(1.0f); if(prd_radiance.depth < max_depth) { float3 t; if(refract(t, i, n, refraction_index)) { // check for external or internal reflection float cos_theta = dot(i, n); if(cos_theta < 0.0f) cos_theta = -cos_theta; else cos_theta = dot(t, n); reflection = fresnel_schlick(cos_theta, fresnel_exponent, fresnel_minimum, fresnel_maximum); const float importance = prd_radiance.importance * (1.0f - reflection) * optix::luminance(refraction_color * beer_attenuation); if(importance > importance_cutoff) { const optix::Ray ray(h, t, radiance_ray_type, scene_epsilon); PerRayData_radiance refr_prd; refr_prd.depth = prd_radiance.depth+1; refr_prd.importance = importance; rtTrace(top_object, ray, refr_prd); result += (1.0f - reflection) * refraction_color * refr_prd.result; } else result += (1.0f - reflection) * refraction_color * cutoff_color; } const float3 r = reflect(i, n); const float importance = prd_radiance.importance * reflection * optix::luminance(reflection_color * beer_attenuation); if(importance > importance_cutoff) { const optix::Ray ray(h, r, radiance_ray_type, scene_epsilon); PerRayData_radiance refl_prd; refl_prd.depth = prd_radiance.depth + 1; refl_prd.importance = importance; rtTrace(top_object, ray, refl_prd); result += reflection * reflection_color * refl_prd.result; } else result += reflection * reflection_color * cutoff_color; } prd_radiance.result = result * beer_attenuation; } rtDeclareVariable(float3, Ka, , ); rtDeclareVariable(float3, Kd, , ); rtDeclareVariable(float3, Ks, , ); rtDeclareVariable(float, phong_exponent, , ); rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> diffuse_map; rtTextureSampler<uchar4, 2, cudaReadModeNormalizedFloat> specular_map; rtDeclareVariable(float3, texcoord, attribute texcoord, ); // //solid mesh with textures // RT_PROGRAM void closest_hit_solid() { const float4 pKd = tex2D(diffuse_map, texcoord.x, texcoord.y); if(prd_radiance.depth < max_depth && pKd.w < importance_cutoff) { const optix::Ray newray(ray.origin + t_hit * ray.direction, ray.direction, radiance_ray_type, scene_epsilon); prd_radiance.depth++; rtTrace(top_object, newray, prd_radiance); return; } const float3 world_shading_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float3 world_geometric_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, geometric_normal)); const float3 ffnormal = faceforward(world_shading_normal, -ray.direction, world_geometric_normal); const float4 pKs = tex2D(specular_map, texcoord.x, texcoord.y); const float3 hit_point = ray.origin + t_hit * ray.direction; const uint2 &screen = output_buffer.size(); const uint2 newLaunchIndex = make_uint2(launch_index.x, launch_index.y + myStripe * output_buffer.size().y / renderingDivisionLevel); unsigned int seed = tea<1>(screen.x * newLaunchIndex.y + newLaunchIndex.x, rnd_seed); const float occlusion = ambientOcclusion(hit_point, ffnormal, seed); //phongShade(ffnormal, make_float3(0.0f), make_float3(0.0f), make_float3(0.0f), phong_exp, reflectivity); //phongShade(make_float3(abs(ffnormal.x), abs(ffnormal.y), abs(ffnormal.z)), make_float3(0.0f), make_float3(0.0f), make_float3(0.0f), phong_exp, reflectivity); phongShade(hit_point, make_float3(pKd) * Ka, make_float3(pKd) * Kd, make_float3(pKs) * Ks, ffnormal, pKs.w * 255.0f * phong_exponent, seed); prd_radiance.result *= occlusion; } rtDeclareVariable(float3, reflectivity, , ); // //solid mesh with textures and reflectivity // RT_PROGRAM void closest_hit_reflective() { const float4 pKd = tex2D(diffuse_map, texcoord.x, texcoord.y); if(prd_radiance.depth < max_depth && pKd.w < importance_cutoff) { const optix::Ray newray(ray.origin + t_hit * ray.direction, ray.direction, radiance_ray_type, scene_epsilon); prd_radiance.depth++; rtTrace(top_object, newray, prd_radiance); return; } const float3 world_shading_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, shading_normal)); const float3 world_geometric_normal = normalize(rtTransformNormal(RT_OBJECT_TO_WORLD, geometric_normal)); const float3 ffnormal = faceforward(world_shading_normal, -ray.direction, world_geometric_normal); const float3 hit_point = ray.origin + t_hit * ray.direction; const uint2 &screen = output_buffer.size(); const uint2 newLaunchIndex = make_uint2(launch_index.x, launch_index.y + myStripe * output_buffer.size().y / renderingDivisionLevel); unsigned int seed = tea<1>(screen.x * newLaunchIndex.y + newLaunchIndex.x, rnd_seed); const float occlusion = ambientOcclusion(hit_point, ffnormal, seed); const float4 pKs = tex2D(specular_map, texcoord.x, texcoord.y); phongShade(hit_point, make_float3(pKd) * Ka, make_float3(pKd) * Kd, make_float3(pKs) * Ks, ffnormal, pKs.w * 255.0f, seed); phongReflect(hit_point, ffnormal, reflectivity); prd_radiance.result *= occlusion; } // // Terminates and fully attenuates ray after any hit if not opaque // RT_PROGRAM void any_hit_solid() { const float opacity = tex2D(diffuse_map, texcoord.x, texcoord.y).w; if(opacity < importance_cutoff) rtIgnoreIntersection(); phongShadowed(); } rtDeclareVariable(float, light_brightness, attribute light_brightness, ); rtDeclareVariable(float3, light_color, , ); // // Light with bloom effect // RT_PROGRAM void closest_hit_light() { if(light_brightness >= 1.0f) { prd_radiance.result = light_color; return; } const float3 hit_point = ray.origin + t_hit * ray.direction; const optix::Ray ray(hit_point, ray.direction, radiance_ray_type, scene_epsilon); PerRayData_radiance new_prd; new_prd.depth = prd_radiance.depth; new_prd.importance = prd_radiance.importance; rtTrace(top_object, ray, new_prd); prd_radiance.result = new_prd.result + light_color * light_brightness; } // // Ignores intersection // RT_PROGRAM void any_hit_light() { rtIgnoreIntersection(); }
9e7c9d9078941e58b86edb70c548df1b0738b0b6.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <af/dim4.hpp> #include <af/defines.h> #include <ArrayInfo.hpp> #include <Array.hpp> #include <copy.hpp> #include <fft.hpp> #include <err_cuda.hpp> #include <hipfft.h> #include <hip/hip_complex.h> #include <string> #include <cstdio> using af::dim4; using std::string; namespace cuda { // cuFFTPlanner will do very basic plan caching. // it looks for required candidate in mHandles array and returns if found one. // otherwise, it will create a plan and set it at the mAvailSlotIndex and increment // the slot index variable in ciruclar fashion 0 to MAX_PLAN_CACHE, then back to zero and repeat. class cuFFTPlanner { friend void find_cufft_plan(hipfftHandle &plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, hipfftType type, int batch); public: static cuFFTPlanner& getInstance() { static cuFFTPlanner single_instance; return single_instance; } private: cuFFTPlanner() : mAvailSlotIndex(0) {} cuFFTPlanner(cuFFTPlanner const&); void operator=(cuFFTPlanner const&); static const int MAX_PLAN_CACHE = 5; int mAvailSlotIndex; hipfftHandle mHandles[MAX_PLAN_CACHE]; string mKeys[MAX_PLAN_CACHE]; }; void find_cufft_plan(hipfftHandle &plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, hipfftType type, int batch) { cuFFTPlanner &planner = cuFFTPlanner::getInstance(); // create the key string char key_str_temp[64]; sprintf(key_str_temp, "%d:", rank); string key_string(key_str_temp); for(int r=0; r<rank; ++r) { sprintf(key_str_temp, "%d:", n[r]); key_string.append(std::string(key_str_temp)); } if (inembed!=NULL) { for(int r=0; r<rank; ++r) { sprintf(key_str_temp, "%d:", inembed[r]); key_string.append(std::string(key_str_temp)); } sprintf(key_str_temp, "%d:%d:", istride, idist); key_string.append(std::string(key_str_temp)); } if (onembed!=NULL) { for(int r=0; r<rank; ++r) { sprintf(key_str_temp, "%d:", onembed[r]); key_string.append(std::string(key_str_temp)); } sprintf(key_str_temp, "%d:%d:", ostride, odist); key_string.append(std::string(key_str_temp)); } sprintf(key_str_temp, "%d:%d", (int)type, batch); key_string.append(std::string(key_str_temp)); // find the matching plan_index in the array cuFFTPlanner::mKeys int plan_index = -1; for (int i=0; i<cuFFTPlanner::MAX_PLAN_CACHE; ++i) { if (key_string==planner.mKeys[i]) { plan_index = i; break; } } // return mHandles[plan_index] if plan_index valid if (plan_index!=-1) { plan = planner.mHandles[plan_index]; return; } // otherwise create a new plan and set it at mAvailSlotIndex // and finally set it to output plan variable int slot_index = planner.mAvailSlotIndex; hipfftResult res= hipfftDestroy(planner.mHandles[slot_index]); if (res==HIPFFT_SUCCESS || HIPFFT_INVALID_PLAN) { hipfftHandle temp; hipfftResult res = hipfftPlanMany(&temp, rank, n, inembed, istride, idist, onembed, ostride, odist, type, batch); switch(res) { case HIPFFT_ALLOC_FAILED : AF_ERROR("cuFFTPlanMany: cuFFT GPU resource allocation failed" , AF_ERR_INTERNAL); case HIPFFT_INVALID_VALUE : AF_ERROR("cuFFTPlanMany: invalid parameters passed to cuFFT API" , AF_ERR_INTERNAL); case HIPFFT_INTERNAL_ERROR: AF_ERROR("cuFFTPlanMany: internal driver detected using cuFFT" , AF_ERR_INTERNAL); case HIPFFT_SETUP_FAILED : AF_ERROR("cuFFTPlanMany: cuFFT library initialization failed" , AF_ERR_INTERNAL); case HIPFFT_INVALID_SIZE : AF_ERROR("cuFFTPlanMany: invalid size parameters passed to cuFFT", AF_ERR_INTERNAL); default: //HIPFFT_SUCCESS { plan = temp; planner.mHandles[slot_index] = temp; planner.mKeys[slot_index] = key_string; planner.mAvailSlotIndex = (slot_index + 1)%cuFFTPlanner::MAX_PLAN_CACHE; } break; } } else AF_ERROR("cuFFTDestroy call failed", AF_ERR_INTERNAL); } template<typename T> struct cufft_transform; #define CUFFT_FUNC(T, TRANSFORM_TYPE) \ template<> \ struct cufft_transform<T> \ { \ enum { type = CUFFT_##TRANSFORM_TYPE }; \ hipfftResult \ operator() (hipfftHandle plan, T *in, T *out, int dir) { \ return cufftExec##TRANSFORM_TYPE(plan, in, out, dir); \ } \ }; CUFFT_FUNC(cfloat , C2C) CUFFT_FUNC(cdouble, Z2Z) template<int rank> void computeDims(int *rdims, const dim4 &idims) { if (rank==3) { rdims[0] = idims[2]; rdims[1] = idims[1]; rdims[2] = idims[0]; } else if(rank==2) { rdims[0] = idims[1]; rdims[1] = idims[0]; } else { rdims[0] = idims[0]; } } template<typename T, int rank, int direction> void cufft_common(Array<T> &arr) { const dim4 dims = arr.dims(); const dim4 strides = arr.strides(); int rank_dims[3]; switch(rank) { case 1: computeDims<1>(rank_dims, dims); break; case 2: computeDims<2>(rank_dims, dims); break; case 3: computeDims<3>(rank_dims, dims); break; } hipfftHandle plan; find_cufft_plan(plan, rank, rank_dims, NULL, strides[0], strides[rank], NULL, strides[0], strides[rank], (hipfftType)cufft_transform<T>::type, dims[rank]); cufft_transform<T> transform; transform(plan, arr.get(), arr.get(), direction); } template<int rank> void computePaddedDims(dim4 &pdims, dim_type const * const pad) { if (rank==1) { pdims[0] = pad[0]; } else if (rank==2) { pdims[0] = pad[0]; pdims[1] = pad[1]; } else if (rank==3) { pdims[0] = pad[0]; pdims[1] = pad[1]; pdims[2] = pad[2]; } } template<typename T> T zero() { return 0; } template<> cfloat zero<cfloat>() { return make_cuFloatComplex(0.0f, 0.0f); } template<> cdouble zero<cdouble>() { return make_cuDoubleComplex(0.0, 0.0); } template<typename inType, typename outType, int rank, bool isR2C> Array<outType> fft(Array<inType> const &in, double normalize, dim_type const npad, dim_type const * const pad) { ARG_ASSERT(1, (in.isOwner()==true)); dim4 pdims(1); switch(rank) { case 1 : computePaddedDims<1>(pdims, pad); break; case 2 : computePaddedDims<2>(pdims, pad); break; case 3 : computePaddedDims<3>(pdims, pad); break; default: AF_ERROR("invalid rank", AF_ERR_SIZE); } pdims[rank] = in.dims()[rank]; Array<outType> ret = padArray<inType, outType>(in, (npad>0 ? pdims : in.dims()), zero<outType>(), normalize); cufft_common<outType, rank, HIPFFT_FORWARD>(ret); return ret; } template<typename T, int rank> Array<T> ifft(Array<T> const &in, double normalize, dim_type const npad, dim_type const * const pad) { ARG_ASSERT(1, (in.isOwner()==true)); dim4 pdims(1); switch(rank) { case 1 : computePaddedDims<1>(pdims, pad); break; case 2 : computePaddedDims<2>(pdims, pad); break; case 3 : computePaddedDims<3>(pdims, pad); break; default: AF_ERROR("invalid rank", AF_ERR_SIZE); } pdims[rank] = in.dims()[rank]; Array<T> ret = padArray<T, T>(in, (npad>0 ? pdims : in.dims()), zero<T>(), normalize); cufft_common<T, rank, HIPFFT_BACKWARD>(ret); return ret; } #define INSTANTIATE1(T1, T2)\ template Array<T2> fft <T1, T2, 1, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T2> fft <T1, T2, 2, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T2> fft <T1, T2, 3, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); INSTANTIATE1(float , cfloat ) INSTANTIATE1(double , cdouble) #define INSTANTIATE2(T)\ template Array<T> fft <T, T, 1, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> fft <T, T, 2, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> fft <T, T, 3, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> ifft<T, 1>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> ifft<T, 2>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> ifft<T, 3>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); INSTANTIATE2(cfloat ) INSTANTIATE2(cdouble) }
9e7c9d9078941e58b86edb70c548df1b0738b0b6.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <af/dim4.hpp> #include <af/defines.h> #include <ArrayInfo.hpp> #include <Array.hpp> #include <copy.hpp> #include <fft.hpp> #include <err_cuda.hpp> #include <cufft.h> #include <cuComplex.h> #include <string> #include <cstdio> using af::dim4; using std::string; namespace cuda { // cuFFTPlanner will do very basic plan caching. // it looks for required candidate in mHandles array and returns if found one. // otherwise, it will create a plan and set it at the mAvailSlotIndex and increment // the slot index variable in ciruclar fashion 0 to MAX_PLAN_CACHE, then back to zero and repeat. class cuFFTPlanner { friend void find_cufft_plan(cufftHandle &plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, cufftType type, int batch); public: static cuFFTPlanner& getInstance() { static cuFFTPlanner single_instance; return single_instance; } private: cuFFTPlanner() : mAvailSlotIndex(0) {} cuFFTPlanner(cuFFTPlanner const&); void operator=(cuFFTPlanner const&); static const int MAX_PLAN_CACHE = 5; int mAvailSlotIndex; cufftHandle mHandles[MAX_PLAN_CACHE]; string mKeys[MAX_PLAN_CACHE]; }; void find_cufft_plan(cufftHandle &plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, cufftType type, int batch) { cuFFTPlanner &planner = cuFFTPlanner::getInstance(); // create the key string char key_str_temp[64]; sprintf(key_str_temp, "%d:", rank); string key_string(key_str_temp); for(int r=0; r<rank; ++r) { sprintf(key_str_temp, "%d:", n[r]); key_string.append(std::string(key_str_temp)); } if (inembed!=NULL) { for(int r=0; r<rank; ++r) { sprintf(key_str_temp, "%d:", inembed[r]); key_string.append(std::string(key_str_temp)); } sprintf(key_str_temp, "%d:%d:", istride, idist); key_string.append(std::string(key_str_temp)); } if (onembed!=NULL) { for(int r=0; r<rank; ++r) { sprintf(key_str_temp, "%d:", onembed[r]); key_string.append(std::string(key_str_temp)); } sprintf(key_str_temp, "%d:%d:", ostride, odist); key_string.append(std::string(key_str_temp)); } sprintf(key_str_temp, "%d:%d", (int)type, batch); key_string.append(std::string(key_str_temp)); // find the matching plan_index in the array cuFFTPlanner::mKeys int plan_index = -1; for (int i=0; i<cuFFTPlanner::MAX_PLAN_CACHE; ++i) { if (key_string==planner.mKeys[i]) { plan_index = i; break; } } // return mHandles[plan_index] if plan_index valid if (plan_index!=-1) { plan = planner.mHandles[plan_index]; return; } // otherwise create a new plan and set it at mAvailSlotIndex // and finally set it to output plan variable int slot_index = planner.mAvailSlotIndex; cufftResult res= cufftDestroy(planner.mHandles[slot_index]); if (res==CUFFT_SUCCESS || CUFFT_INVALID_PLAN) { cufftHandle temp; cufftResult res = cufftPlanMany(&temp, rank, n, inembed, istride, idist, onembed, ostride, odist, type, batch); switch(res) { case CUFFT_ALLOC_FAILED : AF_ERROR("cuFFTPlanMany: cuFFT GPU resource allocation failed" , AF_ERR_INTERNAL); case CUFFT_INVALID_VALUE : AF_ERROR("cuFFTPlanMany: invalid parameters passed to cuFFT API" , AF_ERR_INTERNAL); case CUFFT_INTERNAL_ERROR: AF_ERROR("cuFFTPlanMany: internal driver detected using cuFFT" , AF_ERR_INTERNAL); case CUFFT_SETUP_FAILED : AF_ERROR("cuFFTPlanMany: cuFFT library initialization failed" , AF_ERR_INTERNAL); case CUFFT_INVALID_SIZE : AF_ERROR("cuFFTPlanMany: invalid size parameters passed to cuFFT", AF_ERR_INTERNAL); default: //CUFFT_SUCCESS { plan = temp; planner.mHandles[slot_index] = temp; planner.mKeys[slot_index] = key_string; planner.mAvailSlotIndex = (slot_index + 1)%cuFFTPlanner::MAX_PLAN_CACHE; } break; } } else AF_ERROR("cuFFTDestroy call failed", AF_ERR_INTERNAL); } template<typename T> struct cufft_transform; #define CUFFT_FUNC(T, TRANSFORM_TYPE) \ template<> \ struct cufft_transform<T> \ { \ enum { type = CUFFT_##TRANSFORM_TYPE }; \ cufftResult \ operator() (cufftHandle plan, T *in, T *out, int dir) { \ return cufftExec##TRANSFORM_TYPE(plan, in, out, dir); \ } \ }; CUFFT_FUNC(cfloat , C2C) CUFFT_FUNC(cdouble, Z2Z) template<int rank> void computeDims(int *rdims, const dim4 &idims) { if (rank==3) { rdims[0] = idims[2]; rdims[1] = idims[1]; rdims[2] = idims[0]; } else if(rank==2) { rdims[0] = idims[1]; rdims[1] = idims[0]; } else { rdims[0] = idims[0]; } } template<typename T, int rank, int direction> void cufft_common(Array<T> &arr) { const dim4 dims = arr.dims(); const dim4 strides = arr.strides(); int rank_dims[3]; switch(rank) { case 1: computeDims<1>(rank_dims, dims); break; case 2: computeDims<2>(rank_dims, dims); break; case 3: computeDims<3>(rank_dims, dims); break; } cufftHandle plan; find_cufft_plan(plan, rank, rank_dims, NULL, strides[0], strides[rank], NULL, strides[0], strides[rank], (cufftType)cufft_transform<T>::type, dims[rank]); cufft_transform<T> transform; transform(plan, arr.get(), arr.get(), direction); } template<int rank> void computePaddedDims(dim4 &pdims, dim_type const * const pad) { if (rank==1) { pdims[0] = pad[0]; } else if (rank==2) { pdims[0] = pad[0]; pdims[1] = pad[1]; } else if (rank==3) { pdims[0] = pad[0]; pdims[1] = pad[1]; pdims[2] = pad[2]; } } template<typename T> T zero() { return 0; } template<> cfloat zero<cfloat>() { return make_cuFloatComplex(0.0f, 0.0f); } template<> cdouble zero<cdouble>() { return make_cuDoubleComplex(0.0, 0.0); } template<typename inType, typename outType, int rank, bool isR2C> Array<outType> fft(Array<inType> const &in, double normalize, dim_type const npad, dim_type const * const pad) { ARG_ASSERT(1, (in.isOwner()==true)); dim4 pdims(1); switch(rank) { case 1 : computePaddedDims<1>(pdims, pad); break; case 2 : computePaddedDims<2>(pdims, pad); break; case 3 : computePaddedDims<3>(pdims, pad); break; default: AF_ERROR("invalid rank", AF_ERR_SIZE); } pdims[rank] = in.dims()[rank]; Array<outType> ret = padArray<inType, outType>(in, (npad>0 ? pdims : in.dims()), zero<outType>(), normalize); cufft_common<outType, rank, CUFFT_FORWARD>(ret); return ret; } template<typename T, int rank> Array<T> ifft(Array<T> const &in, double normalize, dim_type const npad, dim_type const * const pad) { ARG_ASSERT(1, (in.isOwner()==true)); dim4 pdims(1); switch(rank) { case 1 : computePaddedDims<1>(pdims, pad); break; case 2 : computePaddedDims<2>(pdims, pad); break; case 3 : computePaddedDims<3>(pdims, pad); break; default: AF_ERROR("invalid rank", AF_ERR_SIZE); } pdims[rank] = in.dims()[rank]; Array<T> ret = padArray<T, T>(in, (npad>0 ? pdims : in.dims()), zero<T>(), normalize); cufft_common<T, rank, CUFFT_INVERSE>(ret); return ret; } #define INSTANTIATE1(T1, T2)\ template Array<T2> fft <T1, T2, 1, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T2> fft <T1, T2, 2, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T2> fft <T1, T2, 3, true >(const Array<T1> &in, double normalize, dim_type const npad, dim_type const * const pad); INSTANTIATE1(float , cfloat ) INSTANTIATE1(double , cdouble) #define INSTANTIATE2(T)\ template Array<T> fft <T, T, 1, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> fft <T, T, 2, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> fft <T, T, 3, false>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> ifft<T, 1>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> ifft<T, 2>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); \ template Array<T> ifft<T, 3>(const Array<T> &in, double normalize, dim_type const npad, dim_type const * const pad); INSTANTIATE2(cfloat ) INSTANTIATE2(cdouble) }
e897da440cd4621cdb1e2c91433770977d9ece73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // // Modified by DDK // ------------------------------------------------------------------ #include <cfloat> #include "caffe/fast_rcnn_action_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void ScenePoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // caffe_copy( rois_blob_.count(), rois_blob_.cpu_data(), rois_blob_.mutable_gpu_data() ); // const Dtype* bottom_rois = rois_blob_.gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); // int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ScenePoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } // caffe_copy( rois_blob_.count(), rois_blob_.cpu_data(), rois_blob_.mutable_gpu_data() ); // const Dtype* bottom_rois = rois_blob_.gpu_data(); const int* argmax_data = max_idx_.gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); // const int count = bottom[0]->count(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ScenePoolingLayer); } // namespace caffe
e897da440cd4621cdb1e2c91433770977d9ece73.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // // Modified by DDK // ------------------------------------------------------------------ #include <cfloat> #include "caffe/fast_rcnn_action_layers.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename Dtype> void ScenePoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // caffe_copy( rois_blob_.count(), rois_blob_.cpu_data(), rois_blob_.mutable_gpu_data() ); // const Dtype* bottom_rois = rois_blob_.gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); // int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_data = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { gradient += offset_top_diff[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ScenePoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } // caffe_copy( rois_blob_.count(), rois_blob_.cpu_data(), rois_blob_.mutable_gpu_data() ); // const Dtype* bottom_rois = rois_blob_.gpu_data(); const int* argmax_data = max_idx_.gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); // const int count = bottom[0]->count(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ScenePoolingLayer); } // namespace caffe
68dacdfedce7e1954191edaf2504815efd3c5319.hip
// !!! This is a file automatically generated by hipify!!! /* ************************************************************************** */ /* */ /* ::: :::::::: */ /* cuda_malloc.cu :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: jwalsh <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2017/04/22 12:51:28 by tgros #+# #+# */ /* Updated: 2017/06/08 15:51:14 by jwalsh ### ########.fr */ /* */ /* ************************************************************************** */ #include "../../inc/rt.cuh" #include "../inc/cuda_call.cuh" static void reset_update_struct(t_raytracing_tools *r); bool cuda_malloc_scene(t_raytracing_tools *r); /* ** Allocates memory on the device and on pinned memory the various sturctures ** in the scene. */ bool cuda_malloc(t_raytracing_tools *r) { t_scene h_scene_to_array; memcpy(&h_scene_to_array, r->scene, sizeof(t_scene) - (sizeof(void *) * 3)); memcpy(r->h_d_scene, r->scene, sizeof(t_scene) - (sizeof(void *) * 3)); if (cuda_malloc_objects(r, &h_scene_to_array) == false) return(false); if (cuda_malloc_lights(r, &h_scene_to_array) == false) return(false); if (cuda_malloc_camera(r) == false) return(false); if (cuda_malloc_scene(r) == false) return(false); gpu_errchk(hipMemcpy(r->d_scene, r->h_d_scene, sizeof(t_scene), hipMemcpyHostToDevice)); reset_update_struct(r); return (true); } static void reset_update_struct(t_raytracing_tools *r) { r->update.resolution = 0; r->update.objects = 0; r->update.lights = 0; r->update.cameras = 0; r->update.scene = 0; r->update.ray_depth = 0; r->update.render = 0; r->update.photon_map = 0; } bool cuda_malloc_scene(t_raytracing_tools *r) { if (r->update.resolution == 2) { gpu_errchk((hipHostMalloc(&r->d_pixel_map, sizeof(t_color) * r->scene->res.y * r->scene->res.x))); if (r->scene->is_3d) gpu_errchk((hipHostMalloc(&r->d_pixel_map_3d, sizeof(t_color) * r->scene->res.y * r->scene->res.x))); } if (r->update.ray_depth == 2) { gpu_errchk(hipSetDevice(0)); hipDeviceSetLimit(hipLimitStackSize, 1024 * MAX_RAY_DEPTH); } if (r->update.anaglyph == 2) gpu_errchk((hipHostMalloc(&r->d_pixel_map_3d, sizeof(t_color) * r->scene->res.y * r->scene->res.x))); if (r->update.scene == 2) { if(test_cuda_malloc((void **)(&r->d_scene), sizeof(t_scene)) == false) return(false); } return(true); }
68dacdfedce7e1954191edaf2504815efd3c5319.cu
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* cuda_malloc.cu :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: jwalsh <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2017/04/22 12:51:28 by tgros #+# #+# */ /* Updated: 2017/06/08 15:51:14 by jwalsh ### ########.fr */ /* */ /* ************************************************************************** */ #include "../../inc/rt.cuh" #include "../inc/cuda_call.cuh" static void reset_update_struct(t_raytracing_tools *r); bool cuda_malloc_scene(t_raytracing_tools *r); /* ** Allocates memory on the device and on pinned memory the various sturctures ** in the scene. */ bool cuda_malloc(t_raytracing_tools *r) { t_scene h_scene_to_array; memcpy(&h_scene_to_array, r->scene, sizeof(t_scene) - (sizeof(void *) * 3)); memcpy(r->h_d_scene, r->scene, sizeof(t_scene) - (sizeof(void *) * 3)); if (cuda_malloc_objects(r, &h_scene_to_array) == false) return(false); if (cuda_malloc_lights(r, &h_scene_to_array) == false) return(false); if (cuda_malloc_camera(r) == false) return(false); if (cuda_malloc_scene(r) == false) return(false); gpu_errchk(cudaMemcpy(r->d_scene, r->h_d_scene, sizeof(t_scene), cudaMemcpyHostToDevice)); reset_update_struct(r); return (true); } static void reset_update_struct(t_raytracing_tools *r) { r->update.resolution = 0; r->update.objects = 0; r->update.lights = 0; r->update.cameras = 0; r->update.scene = 0; r->update.ray_depth = 0; r->update.render = 0; r->update.photon_map = 0; } bool cuda_malloc_scene(t_raytracing_tools *r) { if (r->update.resolution == 2) { gpu_errchk((cudaMallocHost(&r->d_pixel_map, sizeof(t_color) * r->scene->res.y * r->scene->res.x))); if (r->scene->is_3d) gpu_errchk((cudaMallocHost(&r->d_pixel_map_3d, sizeof(t_color) * r->scene->res.y * r->scene->res.x))); } if (r->update.ray_depth == 2) { gpu_errchk(cudaSetDevice(0)); cudaDeviceSetLimit(cudaLimitStackSize, 1024 * MAX_RAY_DEPTH); } if (r->update.anaglyph == 2) gpu_errchk((cudaMallocHost(&r->d_pixel_map_3d, sizeof(t_color) * r->scene->res.y * r->scene->res.x))); if (r->update.scene == 2) { if(test_cuda_malloc((void **)(&r->d_scene), sizeof(t_scene)) == false) return(false); } return(true); }
3c95cbec5792e56d6917e1dbe4516dc205f9d452.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2019 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Asher Elmquist // ============================================================================= // // RT kernels for coloring upon ray not intersecting anything // // ============================================================================= */ #include "chrono_sensor/optix/shaders/device_utils.h" extern "C" __global__ void __miss__shader() { const MissParameters* miss = (MissParameters*)optixGetSbtDataPointer(); // figure out type and determine miss parameter for this ray type RayType raytype = (RayType)optixGetPayload_2(); switch (raytype) { case CAMERA_RAY_TYPE: { const CameraMissParameters& camera_miss = miss->camera_miss; PerRayData_camera* prd = getCameraPRD(); if (camera_miss.mode == BackgroundMode::ENVIRONMENT_MAP && camera_miss.env_map) { // evironment map assumes z up float3 ray_dir = optixGetWorldRayDirection(); float theta = atan2f(ray_dir.x, ray_dir.y); float phi = asinf(ray_dir.z); float tex_x = theta / (2 * CUDART_PI_F); float tex_y = phi / CUDART_PI_F + 0.5; float4 tex = tex2D<float4>(camera_miss.env_map, tex_x, tex_y); // Gamma Correction prd->color = Pow(make_float3(tex.x, tex.y, tex.z), 2.2) * prd->contrib_to_pixel; } else if (camera_miss.mode == BackgroundMode::GRADIENT) { // gradient // gradient assumes z=up float3 ray_dir = optixGetWorldRayDirection(); float mix = max(0.f, ray_dir.z); prd->color = (mix * camera_miss.color_zenith + (1 - mix) * camera_miss.color_horizon) * prd->contrib_to_pixel; } else { // default to solid color prd->color = camera_miss.color_zenith * prd->contrib_to_pixel; } // apply fog model if (prd->use_fog && params.fog_scattering > 0.f) { float blend_alpha = expf(-params.fog_scattering * optixGetRayTmax()); prd->color = blend_alpha * prd->color + (1 - blend_alpha) * params.fog_color*prd->contrib_to_pixel; } break; } case LIDAR_RAY_TYPE: { // leave as default values break; } case RADAR_RAY_TYPE: { // leave as default values break; } } }
3c95cbec5792e56d6917e1dbe4516dc205f9d452.cu
/* ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2019 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Authors: Asher Elmquist // ============================================================================= // // RT kernels for coloring upon ray not intersecting anything // // ============================================================================= */ #include "chrono_sensor/optix/shaders/device_utils.h" extern "C" __global__ void __miss__shader() { const MissParameters* miss = (MissParameters*)optixGetSbtDataPointer(); // figure out type and determine miss parameter for this ray type RayType raytype = (RayType)optixGetPayload_2(); switch (raytype) { case CAMERA_RAY_TYPE: { const CameraMissParameters& camera_miss = miss->camera_miss; PerRayData_camera* prd = getCameraPRD(); if (camera_miss.mode == BackgroundMode::ENVIRONMENT_MAP && camera_miss.env_map) { // evironment map assumes z up float3 ray_dir = optixGetWorldRayDirection(); float theta = atan2f(ray_dir.x, ray_dir.y); float phi = asinf(ray_dir.z); float tex_x = theta / (2 * CUDART_PI_F); float tex_y = phi / CUDART_PI_F + 0.5; float4 tex = tex2D<float4>(camera_miss.env_map, tex_x, tex_y); // Gamma Correction prd->color = Pow(make_float3(tex.x, tex.y, tex.z), 2.2) * prd->contrib_to_pixel; } else if (camera_miss.mode == BackgroundMode::GRADIENT) { // gradient // gradient assumes z=up float3 ray_dir = optixGetWorldRayDirection(); float mix = max(0.f, ray_dir.z); prd->color = (mix * camera_miss.color_zenith + (1 - mix) * camera_miss.color_horizon) * prd->contrib_to_pixel; } else { // default to solid color prd->color = camera_miss.color_zenith * prd->contrib_to_pixel; } // apply fog model if (prd->use_fog && params.fog_scattering > 0.f) { float blend_alpha = expf(-params.fog_scattering * optixGetRayTmax()); prd->color = blend_alpha * prd->color + (1 - blend_alpha) * params.fog_color*prd->contrib_to_pixel; } break; } case LIDAR_RAY_TYPE: { // leave as default values break; } case RADAR_RAY_TYPE: { // leave as default values break; } } }
47dca957bf058694945e9ea7b8e7f1a11133c95c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> // error handling #include <helper_cuda.h> // SDK package #include <helper_functions.h> template <int block_size> __global__ void matrixMultiply(float *d_A, float *d_B, float *d_C, int wA, int wB){ unsigned int x_index = blockDim.x * blockIdx.x + threadIdx.x; unsigned int y_index = blockDim.y * blockIdx.y + threadIdx.y; __shared__ float blockA[block_size][block_size]; __shared__ float blockB[block_size][block_size]; float Csub = 0; int tx = threadIdx.x, ty = threadIdx.y; // __syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to proceed. for(int i = 0; i < wA / block_size; i++){ blockA[ty][tx] = d_A[y_index * wA + (i * block_size + tx)]; blockB[ty][tx] = d_B[(i * block_size + ty) * wB + x_index]; __syncthreads(); for(int j = 0; j < block_size; j++){ Csub += (blockA[ty][j] * blockB[j][tx]); } __syncthreads(); } d_C[y_index * wA + x_index] = Csub; //blockC[y_index % blockDim.y][x_index % blockDim.x]; } typedef struct{ // matrixes' content float *h_pA; // partial matrix A float *h_B; // entire matrix B float *h_pC; // partial matrix C float *d_pA; float *d_B; float *d_pC; // stream for async execution hipStream_t stream; } TGPUplan; void init_matrix(float* matrix, int size, float val){ for(int i = 0; i < size; i++) matrix[i] = i ;//val; } int main(int argc, char* argv[]){ if(argc < 5){ printf("Usage: ./matrixMulMultiGPUTiling <GPU_N> <A_height> <A_width> <B_height> <B_width>\n"); return 0; } const int dimA_y = atoi(argv[2]), dimA_x = atoi(argv[3]), dimB_y = atoi(argv[4]), dimB_x = atoi(argv[5]); const int MAX_GPU_COUNT = 32, block_size = 16; // const int dimA_y = 8, dimA_x = 8, dimB_y = 8, dimB_x = 8; // const int MAX_GPU_COUNT = 32, block_size = 2; TGPUplan plan[MAX_GPU_COUNT]; int GPU_N, Sys_GPU_N; GPU_N = atoi(argv[1]); checkCudaErrors(hipGetDeviceCount(&Sys_GPU_N)); if(GPU_N > Sys_GPU_N){ printf("GPU count should be less than %d\n", Sys_GPU_N); } printf("GPU count: %d\n", GPU_N); dim3 dimsA(dimA_x, dimA_y); dim3 dimsB(dimB_x, dimB_y); dim3 dimsC(dimB_x, dimA_y); unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A; checkCudaErrors(hipHostMalloc((void**)(&h_A), mem_size_A)); init_matrix(h_A, size_A, 1.0); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B; checkCudaErrors(hipHostMalloc((void**)(&h_B), mem_size_B)); init_matrix(h_B, size_B, 2.0); unsigned int size_C = dimsC.x * dimsC.y; unsigned int mem_size_C = sizeof(float) * size_C; float *h_C; checkCudaErrors(hipHostMalloc((void**)(&h_C), mem_size_C)); init_matrix(h_C, size_C, 0.0); for(int i = 0; i < GPU_N; i++){ checkCudaErrors(hipSetDevice(i)); checkCudaErrors(hipStreamCreate(&plan[i].stream)); plan[i].h_B = h_B; plan[i].h_pA = h_A + i * (size_A / GPU_N); plan[i].h_pC = h_C + i * (size_C / GPU_N); checkCudaErrors(hipMalloc((void **)(&plan[i].d_B), mem_size_B)); checkCudaErrors(hipMalloc((void **)(&plan[i].d_pA), mem_size_A / GPU_N)); checkCudaErrors(hipMalloc((void **)(&plan[i].d_pC), mem_size_C / GPU_N)); } printf("Allocated memory space on each GPU\n"); // cal_time = HtoD + kernel + DtoH int niter = 300; dim3 threads(block_size, block_size); dim3 grid(dimsC.x / block_size, dimsC.y / (block_size * GPU_N)); for(int i = 0; i < GPU_N; i++){ checkCudaErrors(hipSetDevice(i)); // transfer data from host to device checkCudaErrors(hipMemcpyAsync(plan[i].d_B, plan[i].h_B, mem_size_B, hipMemcpyHostToDevice, plan[i].stream)); checkCudaErrors(hipMemcpyAsync(plan[i].d_pA, plan[i].h_pA, mem_size_A / GPU_N, hipMemcpyHostToDevice, plan[i].stream)); checkCudaErrors(hipMemcpyAsync(plan[i].d_pC, plan[i].h_pC, mem_size_C / GPU_N, hipMemcpyHostToDevice, plan[i].stream)); } hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start)); for(int j = 0; j < niter; j++){ for(int i = 0; i < GPU_N; i++){ // switch to gpu[i] // printf("Now at GPU[%d]\n", i); checkCudaErrors(hipSetDevice(i)); // printf("GPU[%d]: Transfer data from host to device\n", i); hipLaunchKernelGGL(( matrixMultiply<block_size>) , dim3(grid), dim3(threads), 0, plan[i].stream , plan[i].d_pA, plan[i].d_B, plan[i].d_pC, dimsA.x, dimsB.x); // printf("GPU[%d]: Call kernel\n", i); } // sync devices // wait each GPU complete its task for(int i = 0; i < GPU_N; i++){ checkCudaErrors(hipSetDevice(i)); hipStreamSynchronize(plan[i].stream); } } checkCudaErrors(hipEventRecord(stop)); // Wait for the stop event to complete checkCudaErrors(hipEventSynchronize(stop)); for(int i = 0; i < GPU_N; i++){ checkCudaErrors(hipSetDevice(i)); checkCudaErrors(hipMemcpyAsync(plan[i].h_pC, plan[i].d_pC, mem_size_C / GPU_N, hipMemcpyDeviceToHost, plan[i].stream)); // printf("GPU[%d]: Transfer data from device to host\n", i); } float msecTotal = 0.0f; checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); // Compute and print the performance float msecPerMatrixMul = msecTotal / niter; double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) * static_cast<double>(dimsA.y) * static_cast<double>(dimsB.x); double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \ " WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // for(int i = 0; i < size_C; i++){ // printf("%.3f ", h_C[i]); // if((i + 1) % dimsC.x == 0) // printf("\n"); // } }
47dca957bf058694945e9ea7b8e7f1a11133c95c.cu
#include <stdio.h> #include <assert.h> #include <cuda_runtime.h> // error handling #include <helper_cuda.h> // SDK package #include <helper_functions.h> template <int block_size> __global__ void matrixMultiply(float *d_A, float *d_B, float *d_C, int wA, int wB){ unsigned int x_index = blockDim.x * blockIdx.x + threadIdx.x; unsigned int y_index = blockDim.y * blockIdx.y + threadIdx.y; __shared__ float blockA[block_size][block_size]; __shared__ float blockB[block_size][block_size]; float Csub = 0; int tx = threadIdx.x, ty = threadIdx.y; // __syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to proceed. for(int i = 0; i < wA / block_size; i++){ blockA[ty][tx] = d_A[y_index * wA + (i * block_size + tx)]; blockB[ty][tx] = d_B[(i * block_size + ty) * wB + x_index]; __syncthreads(); for(int j = 0; j < block_size; j++){ Csub += (blockA[ty][j] * blockB[j][tx]); } __syncthreads(); } d_C[y_index * wA + x_index] = Csub; //blockC[y_index % blockDim.y][x_index % blockDim.x]; } typedef struct{ // matrixes' content float *h_pA; // partial matrix A float *h_B; // entire matrix B float *h_pC; // partial matrix C float *d_pA; float *d_B; float *d_pC; // stream for async execution cudaStream_t stream; } TGPUplan; void init_matrix(float* matrix, int size, float val){ for(int i = 0; i < size; i++) matrix[i] = i ;//val; } int main(int argc, char* argv[]){ if(argc < 5){ printf("Usage: ./matrixMulMultiGPUTiling <GPU_N> <A_height> <A_width> <B_height> <B_width>\n"); return 0; } const int dimA_y = atoi(argv[2]), dimA_x = atoi(argv[3]), dimB_y = atoi(argv[4]), dimB_x = atoi(argv[5]); const int MAX_GPU_COUNT = 32, block_size = 16; // const int dimA_y = 8, dimA_x = 8, dimB_y = 8, dimB_x = 8; // const int MAX_GPU_COUNT = 32, block_size = 2; TGPUplan plan[MAX_GPU_COUNT]; int GPU_N, Sys_GPU_N; GPU_N = atoi(argv[1]); checkCudaErrors(cudaGetDeviceCount(&Sys_GPU_N)); if(GPU_N > Sys_GPU_N){ printf("GPU count should be less than %d\n", Sys_GPU_N); } printf("GPU count: %d\n", GPU_N); dim3 dimsA(dimA_x, dimA_y); dim3 dimsB(dimB_x, dimB_y); dim3 dimsC(dimB_x, dimA_y); unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A; checkCudaErrors(cudaMallocHost((void**)(&h_A), mem_size_A)); init_matrix(h_A, size_A, 1.0); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B; checkCudaErrors(cudaMallocHost((void**)(&h_B), mem_size_B)); init_matrix(h_B, size_B, 2.0); unsigned int size_C = dimsC.x * dimsC.y; unsigned int mem_size_C = sizeof(float) * size_C; float *h_C; checkCudaErrors(cudaMallocHost((void**)(&h_C), mem_size_C)); init_matrix(h_C, size_C, 0.0); for(int i = 0; i < GPU_N; i++){ checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaStreamCreate(&plan[i].stream)); plan[i].h_B = h_B; plan[i].h_pA = h_A + i * (size_A / GPU_N); plan[i].h_pC = h_C + i * (size_C / GPU_N); checkCudaErrors(cudaMalloc((void **)(&plan[i].d_B), mem_size_B)); checkCudaErrors(cudaMalloc((void **)(&plan[i].d_pA), mem_size_A / GPU_N)); checkCudaErrors(cudaMalloc((void **)(&plan[i].d_pC), mem_size_C / GPU_N)); } printf("Allocated memory space on each GPU\n"); // cal_time = HtoD + kernel + DtoH int niter = 300; dim3 threads(block_size, block_size); dim3 grid(dimsC.x / block_size, dimsC.y / (block_size * GPU_N)); for(int i = 0; i < GPU_N; i++){ checkCudaErrors(cudaSetDevice(i)); // transfer data from host to device checkCudaErrors(cudaMemcpyAsync(plan[i].d_B, plan[i].h_B, mem_size_B, cudaMemcpyHostToDevice, plan[i].stream)); checkCudaErrors(cudaMemcpyAsync(plan[i].d_pA, plan[i].h_pA, mem_size_A / GPU_N, cudaMemcpyHostToDevice, plan[i].stream)); checkCudaErrors(cudaMemcpyAsync(plan[i].d_pC, plan[i].h_pC, mem_size_C / GPU_N, cudaMemcpyHostToDevice, plan[i].stream)); } cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start)); for(int j = 0; j < niter; j++){ for(int i = 0; i < GPU_N; i++){ // switch to gpu[i] // printf("Now at GPU[%d]\n", i); checkCudaErrors(cudaSetDevice(i)); // printf("GPU[%d]: Transfer data from host to device\n", i); matrixMultiply<block_size> <<< grid, threads, 0, plan[i].stream >>>(plan[i].d_pA, plan[i].d_B, plan[i].d_pC, dimsA.x, dimsB.x); // printf("GPU[%d]: Call kernel\n", i); } // sync devices // wait each GPU complete its task for(int i = 0; i < GPU_N; i++){ checkCudaErrors(cudaSetDevice(i)); cudaStreamSynchronize(plan[i].stream); } } checkCudaErrors(cudaEventRecord(stop)); // Wait for the stop event to complete checkCudaErrors(cudaEventSynchronize(stop)); for(int i = 0; i < GPU_N; i++){ checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaMemcpyAsync(plan[i].h_pC, plan[i].d_pC, mem_size_C / GPU_N, cudaMemcpyDeviceToHost, plan[i].stream)); // printf("GPU[%d]: Transfer data from device to host\n", i); } float msecTotal = 0.0f; checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); // Compute and print the performance float msecPerMatrixMul = msecTotal / niter; double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) * static_cast<double>(dimsA.y) * static_cast<double>(dimsB.x); double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \ " WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // for(int i = 0; i < size_C; i++){ // printf("%.3f ", h_C[i]); // if((i + 1) % dimsC.x == 0) // printf("\n"); // } }
67ed7f6649c3e69e2cdfddae0fc8bab87062dcd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Protobuf/TabularRLData.pb.h" #include "rlagent.cuh" #include <sstream> #include <fstream> namespace mancalaCuda { //cuda functions /* every cuda block has it's own simulation of the game it's playing through each sim is just an array npits*2 + 2 large of ints plus a flag indicating who's turn it is */ //not very fast, but allows a space effient representation of state __device__ int getBoardIndex(board_state & bs) { int index = 0; int remainingSeeds = nSeeds_total; for(int i = nPits_total - 2; i >= 0; i--) { remainingSeeds -= bs.pits[i]; int maxAvoidedIndex = 1; for(int j = 0; j < i; j++) { //apply the recursive sums - available states = (Sum)^j (n) => (n)(n+1)(n+2)etc/(i!) maxAvoidedIndex = (maxAvoidedIndex*(remainingSeeds + j))/(j+1); } index += maxAvoidedIndex; } return index; } __device__ bool take_turn(board_state & bs, int action, bool & turnval) { //parameters for the turn int start_index = turnval*(nPits_player +1) + action; int pool_index = (turnval + 1) *(nPits_player + 1) - 1; //take the turn int beads = bs.pits[start_index]; bs.pits[start_index] = 0; int index = start_index; for (int j =0; j < beads; j++) { index ++; if (index >= nPits_total) { index = 0; } bs.pits[index] ++; } if (index != pool_index) { turnval = !turnval; } //empty pot handling if (bs.pits[index] == 1 && index >= turnval*(nPits_player + 1) && index < pool_index) { int opp_index = (nPits_player * 2 - index) % nPits_total; bs.pits[pool_index] += bs.pits[opp_index] + bs.pits[index]; bs.pits[opp_index] = 0; bs.pits[index] = 0; } for(int j = 0; j < nPits_player; j++) { if (bs.player1pits[j] > 0 || bs.player2pits[j] > 0) { return false; } } return true; } __device__ int chooseAction(board_state & bs, bool player, const float* QMat) { int stateIndex = getBoardIndex(bs) * nPits_player; //explortation //start deterministic //choose based on Qmat float maxQ = -100000; int rval = 0; //for test just return first valid int playerInd = player ? nPits_player + 1 : 0; for(int i = 0; i < nPits_player; i++) { float qval = QMat[stateIndex + i]; if(bs.pits[playerInd + i] > 0 && qval > maxQ) { rval = i; maxQ = qval; } } return rval; } __global__ void playGame(int num_sims, int nturns, turn_record * results, const float * QMat) { int run_index = blockIdx.x * blockDim.x + threadIdx.x; int run_stride = blockDim.x * gridDim.x; for (int i = run_index; i < num_sims; i += run_stride) { bool player = false; bool newgame = true; board_state board; for(int t = 0; t < nturns; t++) { if(newgame) { for(int p = 0; p < nPits_player; p++) { board.player1pits[p] = nSeeds; board.player2pits[p] = nSeeds; } board.player1pool = 0; board.player2pool = 0; newgame = false; } results[nturns*i + t].state = board; results[nturns*i + t].player = player; //if a game finishes start a new one, we can finish the sim mid step int action = chooseAction(board, player, QMat); newgame = take_turn(board, action, player); results[nturns*i + t].action = action; if(newgame) { for(int p = 0; p < nPits_player; p++) { board.player1pool += board.player1pits[p]; board.player2pool += board.player2pits[p]; } results[nturns * i + t].reward = board.player1pool > board.player2pool ? 1 : (board.player1pool < board.player2pool ? -1 : -2); } else { results[nturns*i + t].reward = 0; } } } } __global__ void updateQVals(int num_sims, int nturns, const turn_record * results,float * QMat) { int run_index = blockIdx.x * blockDim.x + threadIdx.x; int run_stride = blockDim.x * gridDim.x; for (int i = run_index; i < num_sims; i += run_stride) { ///TODO } } //class functions void RLAgent::parseBoardState(board_state& state, std::ostream & stream) { stream << " |"; for(int i = 0; i < nPits_player; i++) { stream << state.player1pits[i] << "|"; } stream << std::endl; stream << " |" << state.player1pool << "| "; for(int i = 0; i < nPits_player; i++) { stream << " "; } stream << "|" << state.player2pool << "| "; stream << std::endl; stream << " |"; for(int i = 0; i < nPits_player; i++) { stream << state.player2pits[i] << "|"; } stream << std::endl; } RLAgent::RLAgent(int num_sims, int num_turns) { name = "rlagent"; this->num_sims = num_sims; this->num_turns = num_turns; num_records = num_sims*num_turns; record_size = num_records * sizeof(turn_record); num_states = nPits_player; for(int j = 0; j < nPits_total -1; j++) { //apply the recursive sums - available states = (Sum)^j (n) => (n)(n+1)(n+2)etc/(i!) num_states = (num_states*(nSeeds_total + 1 + j))/(j+1); } state_size = num_states * sizeof(float); h_turnRecord.resize(num_records); h_Qvals.resize(num_states); hipMalloc(&d_Qvals, state_size); hipMalloc(&d_turnRecord, record_size); hipMemcpy(d_Qvals, h_Qvals.data(), state_size, hipMemcpyHostToDevice); } RLAgent::~RLAgent() { hipFree(d_turnRecord); hipFree(d_Qvals); } std::string RLAgent::GetName() { return name; } void RLAgent::RunStep() { int threadsPerBlock = 32; int blocksPerGrid = (num_sims + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( playGame), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, num_sims, num_turns, d_turnRecord, d_Qvals ); } void RLAgent::TrainStep() { int threadsPerBlock = 32; int blocksPerGrid = (num_sims + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( updateQVals), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, num_sims, num_turns, d_turnRecord, d_Qvals ); } std::string RLAgent::PrintRun() { hipMemcpy(h_turnRecord.data(), d_turnRecord, record_size, hipMemcpyDeviceToHost); std::stringstream outStream; for(int i = 0; i < num_turns; i++) { outStream << "turn " << i << " action: " << h_turnRecord[i].action << " player: " << h_turnRecord[i].player << " reward: " << h_turnRecord[i].reward << std::endl; parseBoardState(h_turnRecord[i].state, outStream); outStream << std::endl; } return outStream.str(); } void RLAgent::SaveQMat(std::string fileLoc) { hipMemcpy(h_Qvals.data(), d_Qvals, state_size, hipMemcpyDeviceToHost); mancala::QAgent outputProt; outputProt.set_npits(nPits_player); outputProt.set_nseeds(nSeeds); outputProt.mutable_q()->Add(h_Qvals.begin(), h_Qvals.end()); std::cout << num_states << " " << h_Qvals.size() << " "<< outputProt.mutable_q()->size(); std::ofstream file(fileLoc); outputProt.SerializeToOstream(&file); file.close(); } void RLAgent::LoadQMat(std::string fileLoc) { std::ifstream file(fileLoc); mancala::QAgent inputProt; inputProt.ParseFromIstream(&file); file.close(); if(inputProt.q().size() == num_states) { h_Qvals.assign(inputProt.q().begin(), inputProt.q().end()); hipMemcpy(d_Qvals, h_Qvals.data(), state_size, hipMemcpyHostToDevice); } } template<unsigned int blockSize> __device__ void warpReduceMax(volatile int * sdata, int tid) { if (blockSize >= 64) sdata[tid] = sdata[tid] > sdata[tid + 32] ? sdata[tid] : sdata[tid + 32]; if (blockSize >= 32) sdata[tid] = sdata[tid] > sdata[tid + 16] ? sdata[tid] : sdata[tid + 16]; if (blockSize >= 16) sdata[tid] = sdata[tid] > sdata[tid + 8] ? sdata[tid] : sdata[tid + 8]; if (blockSize >= 8) sdata[tid] = sdata[tid] > sdata[tid + 4] ? sdata[tid] : sdata[tid + 4]; if (blockSize >= 4) sdata[tid] = sdata[tid] > sdata[tid + 2] ? sdata[tid] : sdata[tid + 2]; if (blockSize >= 2) sdata[tid] = sdata[tid] > sdata[tid + 1] ? sdata[tid] : sdata[tid + 1]; } template<unsigned int blockSize> __global__ void reduceMax(int datalen, int *g_idata, int *g_odata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = INT_MIN; //serial reduction first until we come down to the grid size while(i < datalen) { int a = g_idata[i]; sdata[tid] = sdata[tid] > a ? sdata[tid] : a; i += gridSize; } __syncthreads(); // do reduction in shared mem if(blockSize >= 512) { if (tid < 256) { sdata[tid] = sdata[tid] > sdata[tid + 256] ? sdata[tid] : sdata[tid + 256];} __syncthreads(); } if(blockSize >= 256) { if (tid < 128) { sdata[tid] = sdata[tid] > sdata[tid + 128] ? sdata[tid] : sdata[tid + 128];} __syncthreads(); } if(blockSize >= 128) { if (tid < 64) { sdata[tid] = sdata[tid] > sdata[tid + 64] ? sdata[tid] : sdata[tid + 64];} __syncthreads(); } if(tid < 32) { warpReduceMax<blockSize>(sdata, tid); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } //for practicing reduce int RLAgent::GetMax(std::vector<int> values) { int dataSize = values.size()*sizeof(int); int* d_values; int* d_output; hipMalloc(&d_values, dataSize); hipMalloc(&d_output, dataSize); hipMemcpy(d_values, values.data(), dataSize, hipMemcpyHostToDevice); constexpr int blockSize = 128; constexpr int itemsPerThread = 128; constexpr int itemsPerBlock = blockSize*itemsPerThread; for(int i = values.size(); i > 1; i = i/ blockSize + 1) { int nBlock = (i + itemsPerBlock - 1)/(itemsPerBlock); hipLaunchKernelGGL(( reduceMax<blockSize>), dim3(nBlock), dim3(blockSize), blockSize*sizeof(int), 0, i, d_values, d_output); i = i/ blockSize + 1; if(i > 1) { nBlock = (i + itemsPerBlock - 1)/(itemsPerBlock); hipLaunchKernelGGL(( reduceMax<blockSize>), dim3(nBlock), dim3(blockSize), blockSize*sizeof(int), 0, i, d_output, d_values); } else { hipDeviceSynchronize(); hipMemcpy(d_values, d_output, sizeof(int), hipMemcpyDeviceToDevice); hipDeviceSynchronize(); } } int rval = -1; hipMemcpy( &rval, d_values,sizeof(int), hipMemcpyDeviceToHost); hipFree(d_values); hipFree(d_output); return rval; } }
67ed7f6649c3e69e2cdfddae0fc8bab87062dcd0.cu
#include "Protobuf/TabularRLData.pb.h" #include "rlagent.cuh" #include <sstream> #include <fstream> namespace mancalaCuda { //cuda functions /* every cuda block has it's own simulation of the game it's playing through each sim is just an array npits*2 + 2 large of ints plus a flag indicating who's turn it is */ //not very fast, but allows a space effient representation of state __device__ int getBoardIndex(board_state & bs) { int index = 0; int remainingSeeds = nSeeds_total; for(int i = nPits_total - 2; i >= 0; i--) { remainingSeeds -= bs.pits[i]; int maxAvoidedIndex = 1; for(int j = 0; j < i; j++) { //apply the recursive sums - available states = (Sum)^j (n) => (n)(n+1)(n+2)etc/(i!) maxAvoidedIndex = (maxAvoidedIndex*(remainingSeeds + j))/(j+1); } index += maxAvoidedIndex; } return index; } __device__ bool take_turn(board_state & bs, int action, bool & turnval) { //parameters for the turn int start_index = turnval*(nPits_player +1) + action; int pool_index = (turnval + 1) *(nPits_player + 1) - 1; //take the turn int beads = bs.pits[start_index]; bs.pits[start_index] = 0; int index = start_index; for (int j =0; j < beads; j++) { index ++; if (index >= nPits_total) { index = 0; } bs.pits[index] ++; } if (index != pool_index) { turnval = !turnval; } //empty pot handling if (bs.pits[index] == 1 && index >= turnval*(nPits_player + 1) && index < pool_index) { int opp_index = (nPits_player * 2 - index) % nPits_total; bs.pits[pool_index] += bs.pits[opp_index] + bs.pits[index]; bs.pits[opp_index] = 0; bs.pits[index] = 0; } for(int j = 0; j < nPits_player; j++) { if (bs.player1pits[j] > 0 || bs.player2pits[j] > 0) { return false; } } return true; } __device__ int chooseAction(board_state & bs, bool player, const float* QMat) { int stateIndex = getBoardIndex(bs) * nPits_player; //explortation //start deterministic //choose based on Qmat float maxQ = -100000; int rval = 0; //for test just return first valid int playerInd = player ? nPits_player + 1 : 0; for(int i = 0; i < nPits_player; i++) { float qval = QMat[stateIndex + i]; if(bs.pits[playerInd + i] > 0 && qval > maxQ) { rval = i; maxQ = qval; } } return rval; } __global__ void playGame(int num_sims, int nturns, turn_record * results, const float * QMat) { int run_index = blockIdx.x * blockDim.x + threadIdx.x; int run_stride = blockDim.x * gridDim.x; for (int i = run_index; i < num_sims; i += run_stride) { bool player = false; bool newgame = true; board_state board; for(int t = 0; t < nturns; t++) { if(newgame) { for(int p = 0; p < nPits_player; p++) { board.player1pits[p] = nSeeds; board.player2pits[p] = nSeeds; } board.player1pool = 0; board.player2pool = 0; newgame = false; } results[nturns*i + t].state = board; results[nturns*i + t].player = player; //if a game finishes start a new one, we can finish the sim mid step int action = chooseAction(board, player, QMat); newgame = take_turn(board, action, player); results[nturns*i + t].action = action; if(newgame) { for(int p = 0; p < nPits_player; p++) { board.player1pool += board.player1pits[p]; board.player2pool += board.player2pits[p]; } results[nturns * i + t].reward = board.player1pool > board.player2pool ? 1 : (board.player1pool < board.player2pool ? -1 : -2); } else { results[nturns*i + t].reward = 0; } } } } __global__ void updateQVals(int num_sims, int nturns, const turn_record * results,float * QMat) { int run_index = blockIdx.x * blockDim.x + threadIdx.x; int run_stride = blockDim.x * gridDim.x; for (int i = run_index; i < num_sims; i += run_stride) { ///TODO } } //class functions void RLAgent::parseBoardState(board_state& state, std::ostream & stream) { stream << " |"; for(int i = 0; i < nPits_player; i++) { stream << state.player1pits[i] << "|"; } stream << std::endl; stream << " |" << state.player1pool << "| "; for(int i = 0; i < nPits_player; i++) { stream << " "; } stream << "|" << state.player2pool << "| "; stream << std::endl; stream << " |"; for(int i = 0; i < nPits_player; i++) { stream << state.player2pits[i] << "|"; } stream << std::endl; } RLAgent::RLAgent(int num_sims, int num_turns) { name = "rlagent"; this->num_sims = num_sims; this->num_turns = num_turns; num_records = num_sims*num_turns; record_size = num_records * sizeof(turn_record); num_states = nPits_player; for(int j = 0; j < nPits_total -1; j++) { //apply the recursive sums - available states = (Sum)^j (n) => (n)(n+1)(n+2)etc/(i!) num_states = (num_states*(nSeeds_total + 1 + j))/(j+1); } state_size = num_states * sizeof(float); h_turnRecord.resize(num_records); h_Qvals.resize(num_states); cudaMalloc(&d_Qvals, state_size); cudaMalloc(&d_turnRecord, record_size); cudaMemcpy(d_Qvals, h_Qvals.data(), state_size, cudaMemcpyHostToDevice); } RLAgent::~RLAgent() { cudaFree(d_turnRecord); cudaFree(d_Qvals); } std::string RLAgent::GetName() { return name; } void RLAgent::RunStep() { int threadsPerBlock = 32; int blocksPerGrid = (num_sims + threadsPerBlock - 1) / threadsPerBlock; playGame<<<blocksPerGrid, threadsPerBlock>>>(num_sims, num_turns, d_turnRecord, d_Qvals ); } void RLAgent::TrainStep() { int threadsPerBlock = 32; int blocksPerGrid = (num_sims + threadsPerBlock - 1) / threadsPerBlock; updateQVals<<<blocksPerGrid, threadsPerBlock>>>(num_sims, num_turns, d_turnRecord, d_Qvals ); } std::string RLAgent::PrintRun() { cudaMemcpy(h_turnRecord.data(), d_turnRecord, record_size, cudaMemcpyDeviceToHost); std::stringstream outStream; for(int i = 0; i < num_turns; i++) { outStream << "turn " << i << " action: " << h_turnRecord[i].action << " player: " << h_turnRecord[i].player << " reward: " << h_turnRecord[i].reward << std::endl; parseBoardState(h_turnRecord[i].state, outStream); outStream << std::endl; } return outStream.str(); } void RLAgent::SaveQMat(std::string fileLoc) { cudaMemcpy(h_Qvals.data(), d_Qvals, state_size, cudaMemcpyDeviceToHost); mancala::QAgent outputProt; outputProt.set_npits(nPits_player); outputProt.set_nseeds(nSeeds); outputProt.mutable_q()->Add(h_Qvals.begin(), h_Qvals.end()); std::cout << num_states << " " << h_Qvals.size() << " "<< outputProt.mutable_q()->size(); std::ofstream file(fileLoc); outputProt.SerializeToOstream(&file); file.close(); } void RLAgent::LoadQMat(std::string fileLoc) { std::ifstream file(fileLoc); mancala::QAgent inputProt; inputProt.ParseFromIstream(&file); file.close(); if(inputProt.q().size() == num_states) { h_Qvals.assign(inputProt.q().begin(), inputProt.q().end()); cudaMemcpy(d_Qvals, h_Qvals.data(), state_size, cudaMemcpyHostToDevice); } } template<unsigned int blockSize> __device__ void warpReduceMax(volatile int * sdata, int tid) { if (blockSize >= 64) sdata[tid] = sdata[tid] > sdata[tid + 32] ? sdata[tid] : sdata[tid + 32]; if (blockSize >= 32) sdata[tid] = sdata[tid] > sdata[tid + 16] ? sdata[tid] : sdata[tid + 16]; if (blockSize >= 16) sdata[tid] = sdata[tid] > sdata[tid + 8] ? sdata[tid] : sdata[tid + 8]; if (blockSize >= 8) sdata[tid] = sdata[tid] > sdata[tid + 4] ? sdata[tid] : sdata[tid + 4]; if (blockSize >= 4) sdata[tid] = sdata[tid] > sdata[tid + 2] ? sdata[tid] : sdata[tid + 2]; if (blockSize >= 2) sdata[tid] = sdata[tid] > sdata[tid + 1] ? sdata[tid] : sdata[tid + 1]; } template<unsigned int blockSize> __global__ void reduceMax(int datalen, int *g_idata, int *g_odata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = INT_MIN; //serial reduction first until we come down to the grid size while(i < datalen) { int a = g_idata[i]; sdata[tid] = sdata[tid] > a ? sdata[tid] : a; i += gridSize; } __syncthreads(); // do reduction in shared mem if(blockSize >= 512) { if (tid < 256) { sdata[tid] = sdata[tid] > sdata[tid + 256] ? sdata[tid] : sdata[tid + 256];} __syncthreads(); } if(blockSize >= 256) { if (tid < 128) { sdata[tid] = sdata[tid] > sdata[tid + 128] ? sdata[tid] : sdata[tid + 128];} __syncthreads(); } if(blockSize >= 128) { if (tid < 64) { sdata[tid] = sdata[tid] > sdata[tid + 64] ? sdata[tid] : sdata[tid + 64];} __syncthreads(); } if(tid < 32) { warpReduceMax<blockSize>(sdata, tid); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } //for practicing reduce int RLAgent::GetMax(std::vector<int> values) { int dataSize = values.size()*sizeof(int); int* d_values; int* d_output; cudaMalloc(&d_values, dataSize); cudaMalloc(&d_output, dataSize); cudaMemcpy(d_values, values.data(), dataSize, cudaMemcpyHostToDevice); constexpr int blockSize = 128; constexpr int itemsPerThread = 128; constexpr int itemsPerBlock = blockSize*itemsPerThread; for(int i = values.size(); i > 1; i = i/ blockSize + 1) { int nBlock = (i + itemsPerBlock - 1)/(itemsPerBlock); reduceMax<blockSize><<<nBlock, blockSize, blockSize*sizeof(int)>>>(i, d_values, d_output); i = i/ blockSize + 1; if(i > 1) { nBlock = (i + itemsPerBlock - 1)/(itemsPerBlock); reduceMax<blockSize><<<nBlock, blockSize, blockSize*sizeof(int)>>>(i, d_output, d_values); } else { cudaDeviceSynchronize(); cudaMemcpy(d_values, d_output, sizeof(int), cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); } } int rval = -1; cudaMemcpy( &rval, d_values,sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_values); cudaFree(d_output); return rval; } }
c0e1cf62b595926e134a91f4c41f35b9d98d4bf2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define w 8000 // __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] * B[i]; } } __global__ void DotMulVet(const float* A,const float* B,float* C,int N) { int index = blockIdx.x*blockDim.x+threadIdx.x; const int offset = gridDim.x * blockDim.x; while (index < N) { C[index] = A[index] * B[index]; index += offset; } } // extern "C" int func() // { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size int numElements = 512; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } printf("Index h_A h_B\n"); // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; //printf("Index %d: %f %f\n",i,h_A[i],h_B[i]); } printf("\n"); // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size);// if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);// if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( DotMulVet), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < 5; ++i) { if (fabs(h_A[i] * h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n\n"); printf("vectorAdd_Result:\n"); for(int i=0;i<5;i++) printf("Index %d: %f\n",i,h_C[i]); printf("\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; } struct Matrix { int width; int height; float *elements; }; __device__ float getElement(Matrix *A, int row, int col) { return A->elements[row * A->width + col]; } __device__ void setElement(Matrix *A, int row, int col, float value) { A->elements[row * A->width + col] = value; } __global__ void matMulKernel(Matrix *A, Matrix *B, Matrix *C) { float Cvalue = 0.0; int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; for (int i = 0; i < A->width; ++i) { Cvalue += getElement(A, row, i) * getElement(B, i, col); } setElement(C, row, col, Cvalue); } extern "C" int testmatMul() { int width = w; int height = w; Matrix *A, *B, *C; hipMallocManaged((void**)&A, sizeof(Matrix)); hipMallocManaged((void**)&B, sizeof(Matrix)); hipMallocManaged((void**)&C, sizeof(Matrix)); int nBytes = width * height * sizeof(float); hipMallocManaged((void**)&A->elements, nBytes); hipMallocManaged((void**)&B->elements, nBytes); hipMallocManaged((void**)&C->elements, nBytes); A->height = height; A->width = width; B->height = height; B->width = width; C->height = height; C->width = width; for (int i = 0; i < width * height; ++i) { A->elements[i] = 1.0; B->elements[i] = 2.0; } dim3 blockSize(32, 32); dim3 gridSize((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y); /* struct timeval t1,t2; gettimeofday(&t1,NULL); double timeuse; */ matMulKernel << < gridSize, blockSize >> >(A, B, C); hipDeviceSynchronize(); /* gettimeofday(&t2,NULL); timeuse = t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/1000000.0; printf("Use Time:%fs\n", timeuse); */ }
c0e1cf62b595926e134a91f4c41f35b9d98d4bf2.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #define w 8000 //设备端代码 __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] * B[i]; } } __global__ void DotMulVet(const float* A,const float* B,float* C,int N) { int index = blockIdx.x*blockDim.x+threadIdx.x; const int offset = gridDim.x * blockDim.x; while (index < N) { C[index] = A[index] * B[index]; index += offset; } } //主机端代码 extern "C" int func() // 注意这里定义形式 { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 512; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } printf("Index h_A h_B\n"); // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; //printf("Index %d: %f %f\n",i,h_A[i],h_B[i]); } printf("\n"); // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size);//分配一维的线性存储空间 if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);//将一维线性存储器的数据从主机端传输到设备端 if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); DotMulVet<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < 5; ++i) { if (fabs(h_A[i] * h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n\n"); printf("vectorAdd_Result:\n"); for(int i=0;i<5;i++) printf("Index %d: %f\n",i,h_C[i]); printf("\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; } struct Matrix { int width; int height; float *elements; }; __device__ float getElement(Matrix *A, int row, int col) { return A->elements[row * A->width + col]; } __device__ void setElement(Matrix *A, int row, int col, float value) { A->elements[row * A->width + col] = value; } __global__ void matMulKernel(Matrix *A, Matrix *B, Matrix *C) { float Cvalue = 0.0; int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; for (int i = 0; i < A->width; ++i) { Cvalue += getElement(A, row, i) * getElement(B, i, col); } setElement(C, row, col, Cvalue); } extern "C" int testmatMul() { int width = w; int height = w; Matrix *A, *B, *C; cudaMallocManaged((void**)&A, sizeof(Matrix)); cudaMallocManaged((void**)&B, sizeof(Matrix)); cudaMallocManaged((void**)&C, sizeof(Matrix)); int nBytes = width * height * sizeof(float); cudaMallocManaged((void**)&A->elements, nBytes); cudaMallocManaged((void**)&B->elements, nBytes); cudaMallocManaged((void**)&C->elements, nBytes); A->height = height; A->width = width; B->height = height; B->width = width; C->height = height; C->width = width; for (int i = 0; i < width * height; ++i) { A->elements[i] = 1.0; B->elements[i] = 2.0; } dim3 blockSize(32, 32); dim3 gridSize((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y); /* struct timeval t1,t2; gettimeofday(&t1,NULL); double timeuse; */ matMulKernel << < gridSize, blockSize >> >(A, B, C); cudaDeviceSynchronize(); /* gettimeofday(&t2,NULL); timeuse = t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/1000000.0; printf("Use Time:%fs\n", timeuse); */ }
c7a92079e946b18fa17430274f435890ec699d0b.hip
// !!! This is a file automatically generated by hipify!!! ////////////////////////////////////////////////////////////////////////// ////This is the code implementation for GPU Premier League Round 2: n-body simulation ////////////////////////////////////////////////////////////////////////// #include <iostream> #include <fstream> #include <vector> #include <chrono> #include <hip/hip_runtime.h> using namespace std; ////////////////////////////////////////////////////////////////////////// ////TODO 0: Please replace the following strings with your team name and author names ////Note: Please do not use space in the string, use "_" instead ////////////////////////////////////////////////////////////////////////// namespace name { std::string team="Slim_Shaders"; std::string author_1="Andrw_Yang"; std::string author_2="Matthew_Kenney"; }; ////////////////////////////////////////////////////////////////////////// ////TODO 1: your GPU variables and functions start here // Compute Acceleration from Force interaction between two bodies __device__ double3 findAccel(const double4 ipos, const double4 jpos, //// Body comparing to const double epsilon_squared, double3 ai) { // ipos -> position (and mass) of body i // jpos -> position (and mass) of body j // epsilon_squared -> softening factor // ai -> acceleration of body i to update // Compute the Denominator of the Acceleration Update double rx = jpos.x - ipos.x; double ry = jpos.y - ipos.y; double rz = jpos.z - ipos.z; double r2 = rx * rx + ry * ry + rz * rz + epsilon_squared; double r_6 = r2 * r2 * r2; double directionless_ai = jpos.w * rsqrt(r_6); // Compute the change in acceleration: ai.x += rx * directionless_ai; ai.y += ry * directionless_ai; ai.z += rz * directionless_ai; return ai; } // Computes Velocity given Acceleration for a single body // Given time step, velocity, and acceleration for that body __device__ double3 findV(double3 vel, double3 acc, const double dt) { // update velocity vel.x += acc.x * dt; vel.y += acc.y * dt; vel.z += acc.z * dt; return vel; } // Computes the acceleration changes to all bodies for a given time step. // Implements a tiling approach in order to achieve shared memory speedups. __global__ void tileForceBodies(double4* pos, double3 *vel, double3 *acc, const double epsilon_squared, const double dt, const int particle_n) { int global_tid = blockIdx.x * blockDim.x + threadIdx.x; int local_tid = threadIdx.x; if (global_tid < particle_n) { // This thread's information: double4 this_pos = pos[global_tid]; // w (mass), x, y, z double3 this_vel = vel[global_tid]; // current body's velocity double3 this_acc; // current acceleration (set via N-body computation) this_acc.x = 0.0; this_acc.y = 0.0; this_acc.z = 0.0; // 1 x blockDim shared memory extern __shared__ double4 bodyData[]; // Load shared memory #pragma unroll 4 for(int i = 0; i < particle_n; i+=blockDim.x) { // divides particles into N/blockDim chunks // load position values for blockDim particles into shared memory: bodyData[local_tid] = pos[i + local_tid]; // move blockDim slots ahead on each outer loop execution __syncthreads(); // Calculate interactions between current body & all bodies j in the domain j [i, i + blockDim) #pragma unroll 32 for(int j = 0; j < blockDim.x; j++) { double4 jpos = bodyData[j]; this_acc = findAccel(this_pos, jpos, epsilon_squared, this_acc); } __syncthreads(); } // Find velocity this_vel = findV(this_vel, this_acc, dt); // write back to global memory: acc[global_tid] = this_acc; vel[global_tid] = this_vel; } } // Kernel Function to update the positions of all bodies once acceleration update has finished __global__ void updatePositions(double4* pos, double3* vel, const double dt){ int global_tid = blockIdx.x * blockDim.x + threadIdx.x; pos[global_tid].x += vel[global_tid].x * dt; pos[global_tid].y += vel[global_tid].y * dt; pos[global_tid].z += vel[global_tid].z * dt; } ////Your implementations end here ////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////// ////Test function for n-body simulator ofstream out; ////////////////////////////////////////////////////////////////////////// ////Please do not change the values below const double dt=0.001; ////time step const int time_step_num=10; ////number of time steps const double epsilon=1e-2; ////epsilon added in the denominator to avoid 0-division when calculating the gravitational force const double epsilon_squared=epsilon*epsilon; ////epsilon squared ////We use grid_size=4 to help you debug your code, change it to a bigger number (e.g., 16, 32, etc.) to test the performance of your GPU code const unsigned int grid_size=4; ////assuming particles are initialized on a background grid const unsigned int particle_n=pow(grid_size,3); ////assuming each grid cell has one particle at the beginning // Thread Count is min of particle_n and 32 (so as not to spawn excess threads in the case of a small number of bodies) const unsigned int thread_count = min(particle_n, 32); //const unsigned int thread_count = 128; __host__ void Test_N_Body_Simulation() { ////initialize position, velocity, acceleration, and mass printf("Using %d threads per block\n", thread_count); printf("Using %d blocks\n\n", (int)ceil(double(particle_n)/double(thread_count))); double* pos_x=new double[particle_n]; double* pos_y=new double[particle_n]; double* pos_z=new double[particle_n]; ////initialize particle positions as the cell centers on a background grid double dx=1.0/(double)grid_size; for(unsigned int k=0;k<grid_size;k++){ for(unsigned int j=0;j<grid_size;j++){ for(unsigned int i=0;i<grid_size;i++){ unsigned int index=k*grid_size*grid_size+j*grid_size+i; pos_x[index]=dx*(double)i; pos_y[index]=dx*(double)j; pos_z[index]=dx*(double)k; } } } double* vel_x=new double[particle_n]; memset(vel_x,0x00,particle_n*sizeof(double)); double* vel_y=new double[particle_n]; memset(vel_y,0x00,particle_n*sizeof(double)); double* vel_z=new double[particle_n]; memset(vel_z,0x00,particle_n*sizeof(double)); double* acl_x=new double[particle_n]; memset(acl_x,0x00,particle_n*sizeof(double)); double* acl_y=new double[particle_n]; memset(acl_y,0x00,particle_n*sizeof(double)); double* acl_z=new double[particle_n]; memset(acl_z,0x00,particle_n*sizeof(double)); double* mass=new double[particle_n]; for(int i=0;i<particle_n;i++) { mass[i] = 100.0; } ////////////////////////////////////////////////////////////////////////// // Creating double values like CPU and moving to GPU double4* pos_host = new double4[particle_n]; double3* vel_host= new double3[particle_n]; double3* acl_host = new double3[particle_n]; // Set position and mass data in pos_gpu for(unsigned int k=0;k<grid_size;k++){ for(unsigned int j=0;j<grid_size;j++){ for(unsigned int i=0;i<grid_size;i++){ unsigned int index=k*grid_size*grid_size+j*grid_size+i; pos_host[index].x = dx*(double)i; pos_host[index].y = dx*(double)j; pos_host[index].z = dx*(double)k; } } } for(int i=0;i<particle_n;i++) { pos_host[i].w=100.0; } // set velocity and acceleration vectors to 0 for(int i=0; i<particle_n; i++){ vel_host[i].x = 0; vel_host[i].y = 0; vel_host[i].z = 0; } // Copy vectors over to GPU double4* pos_gpu; double3* vel_gpu; double3* acl_gpu; hipMalloc((void**)&pos_gpu, particle_n * sizeof(double4)); hipMalloc((void**)&vel_gpu, particle_n * sizeof(double3)); hipMalloc((void**)&acl_gpu, particle_n * sizeof(double3)); hipMemcpy(pos_gpu, pos_host, particle_n*sizeof(double4), hipMemcpyHostToDevice); hipMemcpy(vel_gpu, vel_host, particle_n*sizeof(double3), hipMemcpyHostToDevice); ////////////////////////////////////////////////////////////////////////// ////Your implementation: n-body simulator on GPU hipEvent_t start,end; hipEventCreate(&start); hipEventCreate(&end); float gpu_time=0.0f; hipDeviceSynchronize(); hipEventRecord(start); ////////////////////////////////////////////////////////////////////////// ////TODO 2: Your GPU functions are called here ////Requirement: You need to copy data from the CPU arrays, conduct computations on the GPU, and copy the values back from GPU to CPU ////The final positions should be stored in the same place as the CPU n-body function, i.e., pos_x, pos_y, pos_z ////The correctness of your simulation will be evaluated by comparing the results (positions) with the results calculated by the default CPU implementations ////////////////////////////////////////////////////////////////////////// int num_blocks = ceil((double)particle_n/(double)thread_count); cout<<"\nTotal number of particles: "<<particle_n<<endl; cout<<"Tracking the motion of particle "<<particle_n/2<<endl; cout<<"Print statements disabled "<<endl; // Step through time for(int i=0;i<time_step_num;i++){ // Here, we synchronize global memory before updating positions to avoid // Read after write conflicts for large values of N: hipLaunchKernelGGL(( tileForceBodies), dim3(num_blocks), dim3(thread_count), thread_count * sizeof(double4), 0, pos_gpu, vel_gpu, acl_gpu, epsilon_squared, dt, particle_n); hipDeviceSynchronize(); hipLaunchKernelGGL(( updatePositions), dim3(num_blocks), dim3(thread_count), 0, 0, pos_gpu, vel_gpu, dt); hipDeviceSynchronize(); // hipMemcpy(pos_host, pos_gpu, particle_n*sizeof(double4), hipMemcpyDeviceToHost); // cout<<"pos on timestep "<<i<<": "<<pos_host[particle_n/2].x<<", "<<pos_host[particle_n/2].y<<", "<<pos_host[particle_n/2].z<<endl; } hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime: %.4f ms\n",gpu_time); hipEventDestroy(start); hipEventDestroy(end); ////////////////////////////////////////////////////////////////////////// hipMemcpy(pos_host, pos_gpu, particle_n*sizeof(double4), hipMemcpyDeviceToHost); out<<"R0: "<<pos_host[particle_n/2].x<<" " <<pos_host[particle_n/2].y<<" " <<pos_host[particle_n/2].z<<endl; out<<"T1: "<<gpu_time<<endl; } int main() { if(name::team=="Team_X"){ printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n"); return 0; } std::string file_name=name::team+"_competition_2_nbody.dat"; out.open(file_name.c_str()); if(out.fail()){ printf("\ncannot open file %s to record results\n",file_name.c_str()); return 0; } Test_N_Body_Simulation(); return 0; }
c7a92079e946b18fa17430274f435890ec699d0b.cu
////////////////////////////////////////////////////////////////////////// ////This is the code implementation for GPU Premier League Round 2: n-body simulation ////////////////////////////////////////////////////////////////////////// #include <iostream> #include <fstream> #include <vector> #include <chrono> #include <cuda_runtime.h> using namespace std; ////////////////////////////////////////////////////////////////////////// ////TODO 0: Please replace the following strings with your team name and author names ////Note: Please do not use space in the string, use "_" instead ////////////////////////////////////////////////////////////////////////// namespace name { std::string team="Slim_Shaders"; std::string author_1="Andrw_Yang"; std::string author_2="Matthew_Kenney"; }; ////////////////////////////////////////////////////////////////////////// ////TODO 1: your GPU variables and functions start here // Compute Acceleration from Force interaction between two bodies __device__ double3 findAccel(const double4 ipos, const double4 jpos, //// Body comparing to const double epsilon_squared, double3 ai) { // ipos -> position (and mass) of body i // jpos -> position (and mass) of body j // epsilon_squared -> softening factor // ai -> acceleration of body i to update // Compute the Denominator of the Acceleration Update double rx = jpos.x - ipos.x; double ry = jpos.y - ipos.y; double rz = jpos.z - ipos.z; double r2 = rx * rx + ry * ry + rz * rz + epsilon_squared; double r_6 = r2 * r2 * r2; double directionless_ai = jpos.w * rsqrt(r_6); // Compute the change in acceleration: ai.x += rx * directionless_ai; ai.y += ry * directionless_ai; ai.z += rz * directionless_ai; return ai; } // Computes Velocity given Acceleration for a single body // Given time step, velocity, and acceleration for that body __device__ double3 findV(double3 vel, double3 acc, const double dt) { // update velocity vel.x += acc.x * dt; vel.y += acc.y * dt; vel.z += acc.z * dt; return vel; } // Computes the acceleration changes to all bodies for a given time step. // Implements a tiling approach in order to achieve shared memory speedups. __global__ void tileForceBodies(double4* pos, double3 *vel, double3 *acc, const double epsilon_squared, const double dt, const int particle_n) { int global_tid = blockIdx.x * blockDim.x + threadIdx.x; int local_tid = threadIdx.x; if (global_tid < particle_n) { // This thread's information: double4 this_pos = pos[global_tid]; // w (mass), x, y, z double3 this_vel = vel[global_tid]; // current body's velocity double3 this_acc; // current acceleration (set via N-body computation) this_acc.x = 0.0; this_acc.y = 0.0; this_acc.z = 0.0; // 1 x blockDim shared memory extern __shared__ double4 bodyData[]; // Load shared memory #pragma unroll 4 for(int i = 0; i < particle_n; i+=blockDim.x) { // divides particles into N/blockDim chunks // load position values for blockDim particles into shared memory: bodyData[local_tid] = pos[i + local_tid]; // move blockDim slots ahead on each outer loop execution __syncthreads(); // Calculate interactions between current body & all bodies j in the domain j ∈ [i, i + blockDim) #pragma unroll 32 for(int j = 0; j < blockDim.x; j++) { double4 jpos = bodyData[j]; this_acc = findAccel(this_pos, jpos, epsilon_squared, this_acc); } __syncthreads(); } // Find velocity this_vel = findV(this_vel, this_acc, dt); // write back to global memory: acc[global_tid] = this_acc; vel[global_tid] = this_vel; } } // Kernel Function to update the positions of all bodies once acceleration update has finished __global__ void updatePositions(double4* pos, double3* vel, const double dt){ int global_tid = blockIdx.x * blockDim.x + threadIdx.x; pos[global_tid].x += vel[global_tid].x * dt; pos[global_tid].y += vel[global_tid].y * dt; pos[global_tid].z += vel[global_tid].z * dt; } ////Your implementations end here ////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////// ////Test function for n-body simulator ofstream out; ////////////////////////////////////////////////////////////////////////// ////Please do not change the values below const double dt=0.001; ////time step const int time_step_num=10; ////number of time steps const double epsilon=1e-2; ////epsilon added in the denominator to avoid 0-division when calculating the gravitational force const double epsilon_squared=epsilon*epsilon; ////epsilon squared ////We use grid_size=4 to help you debug your code, change it to a bigger number (e.g., 16, 32, etc.) to test the performance of your GPU code const unsigned int grid_size=4; ////assuming particles are initialized on a background grid const unsigned int particle_n=pow(grid_size,3); ////assuming each grid cell has one particle at the beginning // Thread Count is min of particle_n and 32 (so as not to spawn excess threads in the case of a small number of bodies) const unsigned int thread_count = min(particle_n, 32); //const unsigned int thread_count = 128; __host__ void Test_N_Body_Simulation() { ////initialize position, velocity, acceleration, and mass printf("Using %d threads per block\n", thread_count); printf("Using %d blocks\n\n", (int)ceil(double(particle_n)/double(thread_count))); double* pos_x=new double[particle_n]; double* pos_y=new double[particle_n]; double* pos_z=new double[particle_n]; ////initialize particle positions as the cell centers on a background grid double dx=1.0/(double)grid_size; for(unsigned int k=0;k<grid_size;k++){ for(unsigned int j=0;j<grid_size;j++){ for(unsigned int i=0;i<grid_size;i++){ unsigned int index=k*grid_size*grid_size+j*grid_size+i; pos_x[index]=dx*(double)i; pos_y[index]=dx*(double)j; pos_z[index]=dx*(double)k; } } } double* vel_x=new double[particle_n]; memset(vel_x,0x00,particle_n*sizeof(double)); double* vel_y=new double[particle_n]; memset(vel_y,0x00,particle_n*sizeof(double)); double* vel_z=new double[particle_n]; memset(vel_z,0x00,particle_n*sizeof(double)); double* acl_x=new double[particle_n]; memset(acl_x,0x00,particle_n*sizeof(double)); double* acl_y=new double[particle_n]; memset(acl_y,0x00,particle_n*sizeof(double)); double* acl_z=new double[particle_n]; memset(acl_z,0x00,particle_n*sizeof(double)); double* mass=new double[particle_n]; for(int i=0;i<particle_n;i++) { mass[i] = 100.0; } ////////////////////////////////////////////////////////////////////////// // Creating double values like CPU and moving to GPU double4* pos_host = new double4[particle_n]; double3* vel_host= new double3[particle_n]; double3* acl_host = new double3[particle_n]; // Set position and mass data in pos_gpu for(unsigned int k=0;k<grid_size;k++){ for(unsigned int j=0;j<grid_size;j++){ for(unsigned int i=0;i<grid_size;i++){ unsigned int index=k*grid_size*grid_size+j*grid_size+i; pos_host[index].x = dx*(double)i; pos_host[index].y = dx*(double)j; pos_host[index].z = dx*(double)k; } } } for(int i=0;i<particle_n;i++) { pos_host[i].w=100.0; } // set velocity and acceleration vectors to 0 for(int i=0; i<particle_n; i++){ vel_host[i].x = 0; vel_host[i].y = 0; vel_host[i].z = 0; } // Copy vectors over to GPU double4* pos_gpu; double3* vel_gpu; double3* acl_gpu; cudaMalloc((void**)&pos_gpu, particle_n * sizeof(double4)); cudaMalloc((void**)&vel_gpu, particle_n * sizeof(double3)); cudaMalloc((void**)&acl_gpu, particle_n * sizeof(double3)); cudaMemcpy(pos_gpu, pos_host, particle_n*sizeof(double4), cudaMemcpyHostToDevice); cudaMemcpy(vel_gpu, vel_host, particle_n*sizeof(double3), cudaMemcpyHostToDevice); ////////////////////////////////////////////////////////////////////////// ////Your implementation: n-body simulator on GPU cudaEvent_t start,end; cudaEventCreate(&start); cudaEventCreate(&end); float gpu_time=0.0f; cudaDeviceSynchronize(); cudaEventRecord(start); ////////////////////////////////////////////////////////////////////////// ////TODO 2: Your GPU functions are called here ////Requirement: You need to copy data from the CPU arrays, conduct computations on the GPU, and copy the values back from GPU to CPU ////The final positions should be stored in the same place as the CPU n-body function, i.e., pos_x, pos_y, pos_z ////The correctness of your simulation will be evaluated by comparing the results (positions) with the results calculated by the default CPU implementations ////////////////////////////////////////////////////////////////////////// int num_blocks = ceil((double)particle_n/(double)thread_count); cout<<"\nTotal number of particles: "<<particle_n<<endl; cout<<"Tracking the motion of particle "<<particle_n/2<<endl; cout<<"Print statements disabled "<<endl; // Step through time for(int i=0;i<time_step_num;i++){ // Here, we synchronize global memory before updating positions to avoid // Read after write conflicts for large values of N: tileForceBodies<<<num_blocks, thread_count, thread_count * sizeof(double4)>>>(pos_gpu, vel_gpu, acl_gpu, epsilon_squared, dt, particle_n); cudaDeviceSynchronize(); updatePositions<<<num_blocks, thread_count>>>(pos_gpu, vel_gpu, dt); cudaDeviceSynchronize(); // cudaMemcpy(pos_host, pos_gpu, particle_n*sizeof(double4), cudaMemcpyDeviceToHost); // cout<<"pos on timestep "<<i<<": "<<pos_host[particle_n/2].x<<", "<<pos_host[particle_n/2].y<<", "<<pos_host[particle_n/2].z<<endl; } cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&gpu_time,start,end); printf("\nGPU runtime: %.4f ms\n",gpu_time); cudaEventDestroy(start); cudaEventDestroy(end); ////////////////////////////////////////////////////////////////////////// cudaMemcpy(pos_host, pos_gpu, particle_n*sizeof(double4), cudaMemcpyDeviceToHost); out<<"R0: "<<pos_host[particle_n/2].x<<" " <<pos_host[particle_n/2].y<<" " <<pos_host[particle_n/2].z<<endl; out<<"T1: "<<gpu_time<<endl; } int main() { if(name::team=="Team_X"){ printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n"); return 0; } std::string file_name=name::team+"_competition_2_nbody.dat"; out.open(file_name.c_str()); if(out.fail()){ printf("\ncannot open file %s to record results\n",file_name.c_str()); return 0; } Test_N_Body_Simulation(); return 0; }
2852d0bf0ada01013a5cfb88aa8aeb9cbf130285.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * My heavily modified version of the NVIDIA transpose code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project //#include <cutil.h> #include "/opt/nvidia/cuda/common/inc/cutil.h" // includes, kernels #include <transpose_kernel_tom.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold( float* reference, float* idata, const unsigned int size_a, const unsigned int size_b, const unsigned int size_c, const unsigned int size_d ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { // number of runs to average timing over int numIterations = 8; // size of the matrix #ifdef __DEVICE_EMULATION__ const unsigned int size_a = 4; const unsigned int size_b = 4; const unsigned int size_c = 4; const unsigned int size_d = 4; #else const unsigned int size = 8; const unsigned int size_a = size; const unsigned int size_b = size; const unsigned int size_c = size; const unsigned int size_d = size; #endif // size of memory required to store the matrix const unsigned int mem_size = sizeof(float) * size_a * size_b * size_c * size_d; unsigned int timer; cutCreateTimer(&timer); CUT_DEVICE_INIT(argc, argv); // allocate host memory float* h_idata = (float*) malloc(mem_size); // initalize the memory unsigned int num=0; for( unsigned int i = 0; i < (size_a); ++i){ for( unsigned int j = 0; j < (size_b); ++j){ for( unsigned int k = 0; k < (size_c); ++k){ for( unsigned int l = 0; l < (size_d); ++l){ h_idata[num++] = (float) (i + j*10 + k*100 + l*1000); } } } } // allocate device memory float* d_idata; float* d_odata; CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size)); // copy host memory to device CUDA_SAFE_CALL( hipMemcpy( d_idata, h_idata, mem_size, hipMemcpyHostToDevice) ); printf("Transposing a %d by %d by %d by %d matrix of floats...\n", size_a, size_b, size_c, size_d); // setup execution parameters dim3 dimBlock(8,8,8); dim3 dimGrid(size_a/dimBlock.x, size_b/dimBlock.y, size_c/dimBlock.z); // warmup so we don't time CUDA startup hipLaunchKernelGGL(( transpose_tom), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_odata, d_idata, size_a, size_b, size_c, size_d); // execute the kernel cutStartTimer(timer); for (int i = 0; i < numIterations; ++i){ hipLaunchKernelGGL(( transpose_tom), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_odata, d_idata, size_a, size_b, size_c, size_d); } hipDeviceSynchronize(); cutStopTimer(timer); float gpuTime = cutGetTimerValue(timer); printf("GPU transpose average time: %0.3f ms\n", gpuTime / numIterations); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host float* h_odata = (float*) malloc(mem_size); CUDA_SAFE_CALL( hipMemcpy( h_odata, d_odata, mem_size, hipMemcpyDeviceToHost) ); // compute reference solution float* reference = (float*) malloc( mem_size); // execute the kernel cutResetTimer(timer); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i){ computeGold( reference, h_idata, size_a, size_b, size_c, size_d); } cutStopTimer(timer); float cpuTime = cutGetTimerValue(timer); printf("CPU transpose average time: %0.3f ms\n", cpuTime / numIterations); // check result if((size_a * size_b * size_c * size_d)<10001){ printf("==================================================================\n"); printf(" Initial Reference OutputData\n"); printf("==================================================================\n"); for( unsigned int i = 0; i < (size_a); ++i){ for( unsigned int j = 0; j < (size_b); ++j){ for( unsigned int k = 0; k < (size_c); ++k){ for( unsigned int l = 0; l < (size_d); ++l){ printf("%3d %3d %3d %3d", i, j, k, l); printf("%14.7f",h_idata[i+size_b*(j+size_c*(k+size_d*l))]); printf("%14.7f",reference[i+size_b*(j+size_c*(k+size_d*l))]); printf("%14.7f\n",h_odata[i+size_b*(j+size_c*(k+size_d*l))]); } } } } } CUTBoolean res = cutComparef( reference, h_odata, size_a * size_b * size_c * size_d); printf("==================================================================\n"); printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("==================================================================\n"); printf("GPU transpose average time: %0.3f ms\n", gpuTime / numIterations); printf("CPU transpose average time: %0.3f ms\n", cpuTime / numIterations); printf("Averaged over %d runs\n", numIterations); // cleanup memory free(h_idata); free(h_odata); free( reference); CUDA_SAFE_CALL(hipFree(d_idata)); CUDA_SAFE_CALL(hipFree(d_odata)); CUT_SAFE_CALL(cutDeleteTimer(timer)); }
2852d0bf0ada01013a5cfb88aa8aeb9cbf130285.cu
/* * My heavily modified version of the NVIDIA transpose code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project //#include <cutil.h> #include "/opt/nvidia/cuda/common/inc/cutil.h" // includes, kernels #include <transpose_kernel_tom.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold( float* reference, float* idata, const unsigned int size_a, const unsigned int size_b, const unsigned int size_c, const unsigned int size_d ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { // number of runs to average timing over int numIterations = 8; // size of the matrix #ifdef __DEVICE_EMULATION__ const unsigned int size_a = 4; const unsigned int size_b = 4; const unsigned int size_c = 4; const unsigned int size_d = 4; #else const unsigned int size = 8; const unsigned int size_a = size; const unsigned int size_b = size; const unsigned int size_c = size; const unsigned int size_d = size; #endif // size of memory required to store the matrix const unsigned int mem_size = sizeof(float) * size_a * size_b * size_c * size_d; unsigned int timer; cutCreateTimer(&timer); CUT_DEVICE_INIT(argc, argv); // allocate host memory float* h_idata = (float*) malloc(mem_size); // initalize the memory unsigned int num=0; for( unsigned int i = 0; i < (size_a); ++i){ for( unsigned int j = 0; j < (size_b); ++j){ for( unsigned int k = 0; k < (size_c); ++k){ for( unsigned int l = 0; l < (size_d); ++l){ h_idata[num++] = (float) (i + j*10 + k*100 + l*1000); } } } } // allocate device memory float* d_idata; float* d_odata; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size)); // copy host memory to device CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) ); printf("Transposing a %d by %d by %d by %d matrix of floats...\n", size_a, size_b, size_c, size_d); // setup execution parameters dim3 dimBlock(8,8,8); dim3 dimGrid(size_a/dimBlock.x, size_b/dimBlock.y, size_c/dimBlock.z); // warmup so we don't time CUDA startup transpose_tom<<< dimGrid, dimBlock >>>(d_odata, d_idata, size_a, size_b, size_c, size_d); // execute the kernel cutStartTimer(timer); for (int i = 0; i < numIterations; ++i){ transpose_tom<<< dimGrid, dimBlock >>>(d_odata, d_idata, size_a, size_b, size_c, size_d); } cudaThreadSynchronize(); cutStopTimer(timer); float gpuTime = cutGetTimerValue(timer); printf("GPU transpose average time: %0.3f ms\n", gpuTime / numIterations); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host float* h_odata = (float*) malloc(mem_size); CUDA_SAFE_CALL( cudaMemcpy( h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost) ); // compute reference solution float* reference = (float*) malloc( mem_size); // execute the kernel cutResetTimer(timer); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i){ computeGold( reference, h_idata, size_a, size_b, size_c, size_d); } cutStopTimer(timer); float cpuTime = cutGetTimerValue(timer); printf("CPU transpose average time: %0.3f ms\n", cpuTime / numIterations); // check result if((size_a * size_b * size_c * size_d)<10001){ printf("==================================================================\n"); printf(" Initial Reference OutputData\n"); printf("==================================================================\n"); for( unsigned int i = 0; i < (size_a); ++i){ for( unsigned int j = 0; j < (size_b); ++j){ for( unsigned int k = 0; k < (size_c); ++k){ for( unsigned int l = 0; l < (size_d); ++l){ printf("%3d %3d %3d %3d", i, j, k, l); printf("%14.7f",h_idata[i+size_b*(j+size_c*(k+size_d*l))]); printf("%14.7f",reference[i+size_b*(j+size_c*(k+size_d*l))]); printf("%14.7f\n",h_odata[i+size_b*(j+size_c*(k+size_d*l))]); } } } } } CUTBoolean res = cutComparef( reference, h_odata, size_a * size_b * size_c * size_d); printf("==================================================================\n"); printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); printf("==================================================================\n"); printf("GPU transpose average time: %0.3f ms\n", gpuTime / numIterations); printf("CPU transpose average time: %0.3f ms\n", cpuTime / numIterations); printf("Averaged over %d runs\n", numIterations); // cleanup memory free(h_idata); free(h_odata); free( reference); CUDA_SAFE_CALL(cudaFree(d_idata)); CUDA_SAFE_CALL(cudaFree(d_odata)); CUT_SAFE_CALL(cutDeleteTimer(timer)); }
16c2435f3bd397bc1ae4f0c882d6c11adb46f484.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> __global__ void K(int *p) { *p = 0; printf("%d\n", *p); } int main() { int *x, *y; hipMalloc(&x, sizeof(int)); hipLaunchKernelGGL(( K), dim3(2), dim3(10), 0, 0, x); hipDeviceSynchronize(); y = x; hipFree(y); hipLaunchKernelGGL(( K), dim3(2), dim3(10), 0, 0, x); hipDeviceSynchronize(); //hipError_t err = hipGetLastError(); //printf("error=%d, %s, %s\n", err, hipGetErrorName(err), hipGetErrorString(err)); return 0; }
16c2435f3bd397bc1ae4f0c882d6c11adb46f484.cu
#include <cuda.h> #include <stdio.h> __global__ void K(int *p) { *p = 0; printf("%d\n", *p); } int main() { int *x, *y; cudaMalloc(&x, sizeof(int)); K<<<2, 10>>>(x); cudaDeviceSynchronize(); y = x; cudaFree(y); K<<<2, 10>>>(x); cudaDeviceSynchronize(); //cudaError_t err = cudaGetLastError(); //printf("error=%d, %s, %s\n", err, cudaGetErrorName(err), cudaGetErrorString(err)); return 0; }
588cd8ad3d3be2ec205b90edc4c089454204a5fb.hip
// !!! This is a file automatically generated by hipify!!! /// LSU EE 7722 (Spring 2014), GPU Microarchitecture // /// Homework 2 SOLUTION (Spring 2014) // // Assignment in: http://www.ece.lsu.edu/koppel/gp/2014/hw01.pdf // and http://www.ece.lsu.edu/koppel/gp/2014/hw02.pdf // #include <pthread.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <ctype.h> #include <time.h> #include <new> #include <hip/hip_runtime.h> #include "util.h" // The unroll degree is used by kernels dots_iterate1 and dots_iterate2. // const int unroll_degree = 8; struct App_Common { float2 *a; float *b; float *b_check; float2 *d_a; float *d_b; float v0, v1, v2; int array_size; int num_threads; }; // In host address space. App_Common app; // In device constant address space. __constant__ App_Common dapp; extern "C" __global__ void dots_iterate0() { const int thread_count = blockDim.x * gridDim.x; const int idx_start = threadIdx.x + blockIdx.x * blockDim.x; // For convenience, assign to local variables. float2* const a = dapp.d_a; float* const b = dapp.d_b; for ( int idx = idx_start; idx < dapp.array_size; idx += thread_count ) b[idx] = dapp.v0 + dapp.v1 * a[idx].x + dapp.v2 * a[idx].y; } extern "C" __global__ void dots_iterate1() { const int thread_count = blockDim.x * gridDim.x; const int idx_start = threadIdx.x + blockIdx.x * blockDim.x; // For convenience, assign to local variables. float2* const a = dapp.d_a; float* const b = dapp.d_b; for ( int idx = idx_start; idx < dapp.array_size; idx += unroll_degree * thread_count ) { float keep[unroll_degree]; for ( int i=0; i<unroll_degree; i++ ) { const int idx2 = idx + i * thread_count; if ( idx2 < dapp.array_size ) keep[i] = dapp.v0 + dapp.v1 * a[idx2].x + dapp.v2 * a[idx2].y; } for ( int i=0; i<unroll_degree; i++ ) { const int idx2 = idx + i * thread_count; if ( idx2 < dapp.array_size ) b[idx2] = keep[i]; } } } extern "C" __global__ void dots_iterate2() { /// Homework 2 SOLUTION const int wp_lg = 5; const int warp_size = 1 << wp_lg; int thread_count = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; const int lane = threadIdx.x & ( warp_size - 1 ); const int wp_num = tid >> wp_lg; const int idx_start = wp_num * warp_size * unroll_degree + lane; // For convenience, assign to local variables. float2* const a = dapp.d_a; float * const b = dapp.d_b; for ( int idx = idx_start; idx < dapp.array_size; idx += unroll_degree * thread_count ) { float keep[unroll_degree]; for ( int i=0; i<unroll_degree; i++ ) { const int idx2 = idx + i * warp_size; keep[i] = dapp.v0 + dapp.v1 * a[idx2].x + dapp.v2 * a[idx2].y; } for ( int i=0; i<unroll_degree; i++ ) { const int idx2 = idx + i * warp_size; b[idx2] = keep[i]; } } } GPU_Info print_gpu_info() { GPU_Info info; // Get information about GPU and its ability to run CUDA. // int device_count; CE( hipGetDeviceCount(&device_count) ); // Get number of GPUs. if ( device_count == 0 ) { fprintf(stderr,"No GPU found, exiting.\n"); exit(1); } /// Print information about the available GPUs. // for ( int dev=0; dev<device_count; dev++ ) { info.get_gpu_info(dev); // Look in file util.h. hipDeviceProp_t& cuda_prop = info.cuda_prop; printf ("GPU %d: %s @ %.2f GHz WITH %d MiB GLOBAL MEM\n", dev, cuda_prop.name, cuda_prop.clockRate/1e6, int(cuda_prop.totalGlobalMem >> 20)); printf ("GPU %d: CC: %d.%d MP: %2d CC/MP: %3d TH/BL: %4d\n", dev, cuda_prop.major, cuda_prop.minor, cuda_prop.multiProcessorCount, info.cc_per_mp, cuda_prop.maxThreadsPerBlock); printf ("GPU %d: SHARED: %5d B CONST: %5d B # REGS: %5d\n", dev, int(cuda_prop.sharedMemPerBlock), int(cuda_prop.totalConstMem), cuda_prop.regsPerBlock); printf ("GPU %d: L2: %d kiB MEM to L2: %.1f GB/s SP %.1f GFLOPS " "OP/ELT %.2f\n", dev, cuda_prop.l2CacheSize >> 10, info.chip_bw_Bps * 1e-9, info.chip_sp_flops * 1e-9, 4 * info.chip_sp_flops / info.chip_bw_Bps); } // Choose GPU 0 because it's usually the better choice. // int dev = 0; CE(hipSetDevice(dev)); printf("Using GPU %d\n",dev); info.get_gpu_info(dev); info.GET_INFO(dots_iterate0); info.GET_INFO(dots_iterate1); info.GET_INFO(dots_iterate2); // Print information about kernel. // printf("\nCUDA Kernel Resource Usage:\n"); for ( int i=0; i<info.num_kernels; i++ ) { printf("For %s:\n", info.ki[i].name); printf(" %6zd shared, %zd const, %zd loc, %d regs; " "%d max threads per block.\n", info.ki[i].cfa.sharedSizeBytes, info.ki[i].cfa.constSizeBytes, info.ki[i].cfa.localSizeBytes, info.ki[i].cfa.numRegs, info.ki[i].cfa.maxThreadsPerBlock); } return info; } int main(int argc, char **argv) { // Get info about GPU and each kernel. // GPU_Info info = print_gpu_info(); // Examine argument 1, grid size. // const int arg1_int = argc < 2 ? info.cuda_prop.multiProcessorCount : atoi(argv[1]); const int num_blocks = abs(arg1_int); // Examine argument 2, size of array in MiB. Fractional values okay. // app.array_size = argc < 3 ? 1 << 20 : int( atof(argv[2]) * (1<<20) ); if ( num_blocks <= 0 || app.array_size <= 0 ) { printf("Usage: %s [ NUM_CUDA_BLOCKS ] [DATA_SIZE_MiB]\n", argv[0]); exit(1); } const int a_size_bytes = app.array_size * sizeof(app.a[0]); const int b_size_bytes = app.array_size * sizeof(app.b[0]); // Allocate storage for CPU copy of data. // app.a = new float2[app.array_size]; app.b = new float[app.array_size]; app.b_check = new float[app.array_size]; const int overrun_size = 256; // Allocate storage for GPU copy of data. // CE( hipMalloc( &app.d_a, (app.array_size+overrun_size)*sizeof(app.a[0]) ) ); CE( hipMalloc( &app.d_b, (app.array_size+overrun_size)*sizeof(app.b[0]) ) ); printf ("\nPreparing for %d blocks operating on %d elements. Unroll degree: %d.\n", num_blocks, app.array_size, unroll_degree); // Initialize input array. // for ( int i=0; i<app.array_size; i++ ) { app.a[i].x = drand48(); app.a[i].y = drand48(); } // Initialize Coefficients // app.v0 = drand48(); app.v1 = drand48(); app.v2 = drand48(); // Compute correct answer (for checking). // for ( int e=0; e<app.array_size; e++ ) app.b_check[e] = app.v0 + app.v1 * app.a[e].x + app.v2 * app.a[e].y; // Prepare events used for timing. // hipEvent_t gpu_start_ce, gpu_stop_ce; CE(hipEventCreate(&gpu_start_ce)); CE(hipEventCreate(&gpu_stop_ce)); // Copy input array from CPU to GPU. // CE( hipMemcpy( app.d_a, app.a, a_size_bytes, hipMemcpyHostToDevice ) ); const double data_size = app.array_size * ( sizeof(app.a[0]) + sizeof(app.b[0]) ); // Stuff needed to print bar graph. // const int full_width = 49; // Width of longest bar. char **stars = (char**) alloca( info.num_kernels * sizeof(void*) ); for ( int j=0; j< info.num_kernels; j++ ) { stars[j] = (char*) alloca(full_width+1); for ( int i=0; i<full_width; i++ ) stars[j][i] = '0' + j; stars[j][full_width] = 0; } for ( int kernel = 0; kernel < info.num_kernels; kernel++ ) { hipFuncAttributes& cfa = info.ki[kernel].cfa; if ( kernel == 2 && cfa.numRegs < 3 ) { printf("Not running kernel %s because code probably not present.\n", info.ki[kernel].name); continue; } // Maximum number of warps per block that this GPU can handle // for this kernel. // const int wp_limit = cfa.maxThreadsPerBlock >> 5; printf("\nRunning kernel %s which uses %d regs on %d blocks.", info.ki[kernel].name, cfa.numRegs, num_blocks); if ( kernel ) printf(" Unroll degree %d\n", unroll_degree); else printf("\n"); printf(" %3s %9s %6s %5s\n", "Num", "Time", "Data", "" ); printf(" %3s %9s %6s %5s\n", "Wps", "s", " GB/s", "Pct" ); for ( int warp_cnt = 1; warp_cnt <= wp_limit; warp_cnt++ ) { const int thd_per_block = warp_cnt << 5; app.num_threads = num_blocks * thd_per_block; // Copy App structure to GPU. // CE( hipMemcpyToSymbol ( dapp, &app, sizeof(app), 0, hipMemcpyHostToDevice ) ); // Zero result array (to catch errors when kernel skips elements). // CE( hipMemset( app.d_b, 0, b_size_bytes ) ); // Measure execution time starting "now", which is after data // set to GPU. // CE(hipEventRecord(gpu_start_ce,0)); // Launch Kernel // info.ki[kernel]hipLaunchKernelGGL((.func_ptr), dim3(num_blocks),dim3(thd_per_block), 0, 0, ); // Stop measuring execution time now, which is before is data // returned from GPU. // CE(hipEventRecord(gpu_stop_ce,0)); CE(hipEventSynchronize(gpu_stop_ce)); float cuda_time_ms = -1.1; CE(hipEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce)); const double elapsed_time_s = cuda_time_ms * 0.001; // Copy output array from GPU to CPU. // CE( hipMemcpy (app.b, app.d_b, b_size_bytes, hipMemcpyDeviceToHost) ); int err = 0; for ( int e=0; e<app.array_size; e++ ) { const float diff = fabs( app.b[e] - app.b_check[e] ); if ( diff > 0.00001 ) { err++; if ( err < 2 ) printf ("Error at array idx %d: %f != %f (correct)\n", e, app.b[e], app.b_check[e]); } } const double frac = data_size / elapsed_time_s / info.chip_bw_Bps; const int s_idx = max(0.0,full_width * ( 1 - frac )); printf(" %2d %9.3f %6.2f %5.1f%% %s\n", warp_cnt, 1e6 * elapsed_time_s, 1e-9 * data_size / elapsed_time_s, 100 * frac, &stars[kernel][s_idx] ); } } }
588cd8ad3d3be2ec205b90edc4c089454204a5fb.cu
/// LSU EE 7722 (Spring 2014), GPU Microarchitecture // /// Homework 2 SOLUTION (Spring 2014) // // Assignment in: http://www.ece.lsu.edu/koppel/gp/2014/hw01.pdf // and http://www.ece.lsu.edu/koppel/gp/2014/hw02.pdf // #include <pthread.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <ctype.h> #include <time.h> #include <new> #include <cuda_runtime.h> #include "util.h" // The unroll degree is used by kernels dots_iterate1 and dots_iterate2. // const int unroll_degree = 8; struct App_Common { float2 *a; float *b; float *b_check; float2 *d_a; float *d_b; float v0, v1, v2; int array_size; int num_threads; }; // In host address space. App_Common app; // In device constant address space. __constant__ App_Common dapp; extern "C" __global__ void dots_iterate0() { const int thread_count = blockDim.x * gridDim.x; const int idx_start = threadIdx.x + blockIdx.x * blockDim.x; // For convenience, assign to local variables. float2* const a = dapp.d_a; float* const b = dapp.d_b; for ( int idx = idx_start; idx < dapp.array_size; idx += thread_count ) b[idx] = dapp.v0 + dapp.v1 * a[idx].x + dapp.v2 * a[idx].y; } extern "C" __global__ void dots_iterate1() { const int thread_count = blockDim.x * gridDim.x; const int idx_start = threadIdx.x + blockIdx.x * blockDim.x; // For convenience, assign to local variables. float2* const a = dapp.d_a; float* const b = dapp.d_b; for ( int idx = idx_start; idx < dapp.array_size; idx += unroll_degree * thread_count ) { float keep[unroll_degree]; for ( int i=0; i<unroll_degree; i++ ) { const int idx2 = idx + i * thread_count; if ( idx2 < dapp.array_size ) keep[i] = dapp.v0 + dapp.v1 * a[idx2].x + dapp.v2 * a[idx2].y; } for ( int i=0; i<unroll_degree; i++ ) { const int idx2 = idx + i * thread_count; if ( idx2 < dapp.array_size ) b[idx2] = keep[i]; } } } extern "C" __global__ void dots_iterate2() { /// Homework 2 SOLUTION const int wp_lg = 5; const int warp_size = 1 << wp_lg; int thread_count = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; const int lane = threadIdx.x & ( warp_size - 1 ); const int wp_num = tid >> wp_lg; const int idx_start = wp_num * warp_size * unroll_degree + lane; // For convenience, assign to local variables. float2* const a = dapp.d_a; float * const b = dapp.d_b; for ( int idx = idx_start; idx < dapp.array_size; idx += unroll_degree * thread_count ) { float keep[unroll_degree]; for ( int i=0; i<unroll_degree; i++ ) { const int idx2 = idx + i * warp_size; keep[i] = dapp.v0 + dapp.v1 * a[idx2].x + dapp.v2 * a[idx2].y; } for ( int i=0; i<unroll_degree; i++ ) { const int idx2 = idx + i * warp_size; b[idx2] = keep[i]; } } } GPU_Info print_gpu_info() { GPU_Info info; // Get information about GPU and its ability to run CUDA. // int device_count; CE( cudaGetDeviceCount(&device_count) ); // Get number of GPUs. if ( device_count == 0 ) { fprintf(stderr,"No GPU found, exiting.\n"); exit(1); } /// Print information about the available GPUs. // for ( int dev=0; dev<device_count; dev++ ) { info.get_gpu_info(dev); // Look in file util.h. cudaDeviceProp& cuda_prop = info.cuda_prop; printf ("GPU %d: %s @ %.2f GHz WITH %d MiB GLOBAL MEM\n", dev, cuda_prop.name, cuda_prop.clockRate/1e6, int(cuda_prop.totalGlobalMem >> 20)); printf ("GPU %d: CC: %d.%d MP: %2d CC/MP: %3d TH/BL: %4d\n", dev, cuda_prop.major, cuda_prop.minor, cuda_prop.multiProcessorCount, info.cc_per_mp, cuda_prop.maxThreadsPerBlock); printf ("GPU %d: SHARED: %5d B CONST: %5d B # REGS: %5d\n", dev, int(cuda_prop.sharedMemPerBlock), int(cuda_prop.totalConstMem), cuda_prop.regsPerBlock); printf ("GPU %d: L2: %d kiB MEM to L2: %.1f GB/s SP %.1f GFLOPS " "OP/ELT %.2f\n", dev, cuda_prop.l2CacheSize >> 10, info.chip_bw_Bps * 1e-9, info.chip_sp_flops * 1e-9, 4 * info.chip_sp_flops / info.chip_bw_Bps); } // Choose GPU 0 because it's usually the better choice. // int dev = 0; CE(cudaSetDevice(dev)); printf("Using GPU %d\n",dev); info.get_gpu_info(dev); info.GET_INFO(dots_iterate0); info.GET_INFO(dots_iterate1); info.GET_INFO(dots_iterate2); // Print information about kernel. // printf("\nCUDA Kernel Resource Usage:\n"); for ( int i=0; i<info.num_kernels; i++ ) { printf("For %s:\n", info.ki[i].name); printf(" %6zd shared, %zd const, %zd loc, %d regs; " "%d max threads per block.\n", info.ki[i].cfa.sharedSizeBytes, info.ki[i].cfa.constSizeBytes, info.ki[i].cfa.localSizeBytes, info.ki[i].cfa.numRegs, info.ki[i].cfa.maxThreadsPerBlock); } return info; } int main(int argc, char **argv) { // Get info about GPU and each kernel. // GPU_Info info = print_gpu_info(); // Examine argument 1, grid size. // const int arg1_int = argc < 2 ? info.cuda_prop.multiProcessorCount : atoi(argv[1]); const int num_blocks = abs(arg1_int); // Examine argument 2, size of array in MiB. Fractional values okay. // app.array_size = argc < 3 ? 1 << 20 : int( atof(argv[2]) * (1<<20) ); if ( num_blocks <= 0 || app.array_size <= 0 ) { printf("Usage: %s [ NUM_CUDA_BLOCKS ] [DATA_SIZE_MiB]\n", argv[0]); exit(1); } const int a_size_bytes = app.array_size * sizeof(app.a[0]); const int b_size_bytes = app.array_size * sizeof(app.b[0]); // Allocate storage for CPU copy of data. // app.a = new float2[app.array_size]; app.b = new float[app.array_size]; app.b_check = new float[app.array_size]; const int overrun_size = 256; // Allocate storage for GPU copy of data. // CE( cudaMalloc( &app.d_a, (app.array_size+overrun_size)*sizeof(app.a[0]) ) ); CE( cudaMalloc( &app.d_b, (app.array_size+overrun_size)*sizeof(app.b[0]) ) ); printf ("\nPreparing for %d blocks operating on %d elements. Unroll degree: %d.\n", num_blocks, app.array_size, unroll_degree); // Initialize input array. // for ( int i=0; i<app.array_size; i++ ) { app.a[i].x = drand48(); app.a[i].y = drand48(); } // Initialize Coefficients // app.v0 = drand48(); app.v1 = drand48(); app.v2 = drand48(); // Compute correct answer (for checking). // for ( int e=0; e<app.array_size; e++ ) app.b_check[e] = app.v0 + app.v1 * app.a[e].x + app.v2 * app.a[e].y; // Prepare events used for timing. // cudaEvent_t gpu_start_ce, gpu_stop_ce; CE(cudaEventCreate(&gpu_start_ce)); CE(cudaEventCreate(&gpu_stop_ce)); // Copy input array from CPU to GPU. // CE( cudaMemcpy( app.d_a, app.a, a_size_bytes, cudaMemcpyHostToDevice ) ); const double data_size = app.array_size * ( sizeof(app.a[0]) + sizeof(app.b[0]) ); // Stuff needed to print bar graph. // const int full_width = 49; // Width of longest bar. char **stars = (char**) alloca( info.num_kernels * sizeof(void*) ); for ( int j=0; j< info.num_kernels; j++ ) { stars[j] = (char*) alloca(full_width+1); for ( int i=0; i<full_width; i++ ) stars[j][i] = '0' + j; stars[j][full_width] = 0; } for ( int kernel = 0; kernel < info.num_kernels; kernel++ ) { cudaFuncAttributes& cfa = info.ki[kernel].cfa; if ( kernel == 2 && cfa.numRegs < 3 ) { printf("Not running kernel %s because code probably not present.\n", info.ki[kernel].name); continue; } // Maximum number of warps per block that this GPU can handle // for this kernel. // const int wp_limit = cfa.maxThreadsPerBlock >> 5; printf("\nRunning kernel %s which uses %d regs on %d blocks.", info.ki[kernel].name, cfa.numRegs, num_blocks); if ( kernel ) printf(" Unroll degree %d\n", unroll_degree); else printf("\n"); printf(" %3s %9s %6s %5s\n", "Num", "Time", "Data", "" ); printf(" %3s %9s %6s %5s\n", "Wps", "µs", " GB/s", "Pct" ); for ( int warp_cnt = 1; warp_cnt <= wp_limit; warp_cnt++ ) { const int thd_per_block = warp_cnt << 5; app.num_threads = num_blocks * thd_per_block; // Copy App structure to GPU. // CE( cudaMemcpyToSymbol ( dapp, &app, sizeof(app), 0, cudaMemcpyHostToDevice ) ); // Zero result array (to catch errors when kernel skips elements). // CE( cudaMemset( app.d_b, 0, b_size_bytes ) ); // Measure execution time starting "now", which is after data // set to GPU. // CE(cudaEventRecord(gpu_start_ce,0)); // Launch Kernel // info.ki[kernel].func_ptr<<<num_blocks,thd_per_block>>>(); // Stop measuring execution time now, which is before is data // returned from GPU. // CE(cudaEventRecord(gpu_stop_ce,0)); CE(cudaEventSynchronize(gpu_stop_ce)); float cuda_time_ms = -1.1; CE(cudaEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce)); const double elapsed_time_s = cuda_time_ms * 0.001; // Copy output array from GPU to CPU. // CE( cudaMemcpy (app.b, app.d_b, b_size_bytes, cudaMemcpyDeviceToHost) ); int err = 0; for ( int e=0; e<app.array_size; e++ ) { const float diff = fabs( app.b[e] - app.b_check[e] ); if ( diff > 0.00001 ) { err++; if ( err < 2 ) printf ("Error at array idx %d: %f != %f (correct)\n", e, app.b[e], app.b_check[e]); } } const double frac = data_size / elapsed_time_s / info.chip_bw_Bps; const int s_idx = max(0.0,full_width * ( 1 - frac )); printf(" %2d %9.3f %6.2f %5.1f%% %s\n", warp_cnt, 1e6 * elapsed_time_s, 1e-9 * data_size / elapsed_time_s, 100 * frac, &stars[kernel][s_idx] ); } } }
d93cbe666e492260bb535abe03dab8fcd40f2ad6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdexcept> #include <chrono> #include <Common/Cuda/CudaSafeCall.h> #include <Common/Cuda/cudaCalcMurmurHash64.h> #include <Common/Cuda/cudaCalcMurmurHash64.h> #include <Interpreters/Cuda/CudaStringsAggregator.h> //#include "hash_sort_alg_magic_numbers.h" using std::chrono::steady_clock; using std::chrono::duration_cast; using std::chrono::milliseconds; namespace DB { CudaStringsAggregator::CudaStringsAggregator(int dev_number_, size_t chunks_num_, UInt32 hash_table_max_size_, UInt32 hash_table_str_buffer_max_size_, UInt32 buffer_max_str_num_, UInt32 buffer_max_size_, CudaAggregateFunctionPtr aggregate_function_) : dev_number(dev_number_), aggregate_function(aggregate_function_) { CUDA_SAFE_CALL(hipSetDevice(dev_number)); chunks.resize(chunks_num_); /// create cuda streams, allocate host and cuda buffers for (size_t i = 0;i < chunks.size();++i) { chunks[i] = WorkChunkInfoPtr(new WorkChunkInfo()); CUDA_SAFE_CALL(hipStreamCreate( &chunks[i]->stream )); //chunks[i]->stream = cudaStreamPerThread; chunks[i]->cuda_hash_table = CudaStringsHashTablePtr( new CudaStringsHashTable(hash_table_max_size_, hash_table_str_buffer_max_size_)); chunks[i]->cuda_buffer_keys = CudaColumnStringPtr( new CudaColumnString(buffer_max_str_num_, buffer_max_size_)); chunks[i]->cuda_buffer_vals = CudaColumnStringPtr( new CudaColumnString(buffer_max_str_num_, buffer_max_size_)); chunks[i]->host_buffer_agg_res_keys = CudaHostStringsBufferPtr( new CudaHostStringsBuffer(buffer_max_str_num_, buffer_max_size_)); chunks[i]->group_nums.resize(buffer_max_str_num_); chunks[i]->group_agg_res = CudaArrayPtr<char>( new CudaArray<char>(hash_table_max_size_ * aggregate_function->cudaSizeOfData())); chunks[i]->host_group_agg_res = CudaHostPinnedArrayPtr<char>( new CudaHostPinnedArray<char>(hash_table_max_size_ * aggregate_function->cudaSizeOfData())); chunks[i]->agg_tmp_buf = CudaArrayPtr<char>( new CudaArray<char>( aggregate_function->cudaSizeOfAddBulkInternalBuf(buffer_max_str_num_) )); } CUDA_SAFE_CALL(hipStreamCreate( &copy_stream )); std::cout << "CudaStringsAggregator created" << std::endl; /*chunks.resize(1); chunks[0] = WorkChunkInfoPtr(new WorkChunkInfo()); chunks[0]->host_group_agg_res = CudaHostPinnedArrayPtr<char>( new CudaHostPinnedArray<char>(10 * aggregate_function->cudaSizeOfData()));*/ } struct ProcessChunkParams { ProcessChunkParams(CudaStringsAggregator *agg_, size_t i_) : agg(agg_),i(i_) { } CudaStringsAggregator *agg; size_t i; }; void callProcessChunk(ProcessChunkParams params) { params.agg->processChunk(params.i); } void CudaStringsAggregator::startProcessing() { /// start processing threads for (size_t i = 0;i < chunks.size();++i) { chunks[i]->cuda_processing_state = false; //chunks[i]->t = std::thread(&CudaStringsAggregator::processChunk, this, i); chunks[i]->t = std::thread(callProcessChunk, ProcessChunkParams(this, i)); } /// set current buffer for data appending curr_filling_chunk = 0; is_vals_needed = aggregate_function->isDataNeeded(); } void CudaStringsAggregator::queueData(size_t str_num, size_t str_buf_sz, const char *str_buf, const OffsetType *offsets, size_t vals_str_buf_sz, const char *vals_str_buf, const OffsetType *vals_offsets) { //printf("CudaStringsAggregator::queueData: params %d %d %d %d\n", str_num, str_buf_sz, vals_str_buf_sz, memcpy_threads_num_); while(1) { if (tryQueueData(str_num, str_buf_sz, str_buf, offsets, vals_str_buf_sz, vals_str_buf, vals_offsets)) return; } } void CudaStringsAggregator::waitQueueData()const { CUDA_SAFE_CALL( hipStreamSynchronize ( copy_stream ) ); } void CudaStringsAggregator::waitProcessed() { { std::unique_lock<std::mutex> lck( chunks[curr_filling_chunk]->cuda_buffer_mtx ); chunks[curr_filling_chunk]->cv_cuda_processing_end.wait( lck, [this]{return !chunks[curr_filling_chunk]->cuda_processing_state;} ); if (!chunks[curr_filling_chunk]->cuda_buffer_keys->empty()) { chunks[curr_filling_chunk]->cuda_processing_state = true; chunks[curr_filling_chunk]->cv_buffer_append_end.notify_one(); } } /// wait till host to gpu copy ends, 'send' empty buffer to signal end of data for (size_t i = 0;i < chunks.size();++i) { std::unique_lock<std::mutex> lck( chunks[i]->cuda_buffer_mtx ); chunks[i]->cv_cuda_processing_end.wait( lck, [this,i]{return !chunks[i]->cuda_processing_state;} ); if (!chunks[i]->cuda_buffer_keys->empty()) throw std::logic_error("CudaStringsAggregator: host buffer is not empty after transfer"); //setting cuda_processing_state with empty buffer means end of processing chunks[i]->cuda_processing_state = true; chunks[i]->cv_buffer_append_end.notify_one(); } /// wait for processes for termination for (size_t i = 0;i < chunks.size();++i) { chunks[i]->t.join(); } /// combine data from different chunks for (size_t i = 1;i < chunks.size();++i) { chunks[i]->cuda_hash_table->mergeToOtherTable(chunks[0]->cuda_hash_table, thrust::raw_pointer_cast(chunks[i]->group_nums.data()), chunks[0]->stream); aggregate_function->cudaMergeBulk(chunks[0]->group_agg_res->getData(), chunks[i]->cuda_hash_table->getBucketsNum(), chunks[i]->group_agg_res->getData(), thrust::raw_pointer_cast(chunks[i]->group_nums.data()), chunks[0]->stream); } CUDA_SAFE_CALL( hipMemcpyAsync ( chunks[0]->host_buffer_agg_res_keys->getBuf(), chunks[0]->cuda_hash_table->getStrBuf(), chunks[0]->cuda_hash_table->getStrBufSz(), hipMemcpyDeviceToHost, chunks[0]->stream ) ); /// TODO get rid of sizeof(UInt32) in all following hipMemcpyAsync!! CUDA_SAFE_CALL( hipMemcpyAsync ( chunks[0]->host_buffer_agg_res_keys->getLens(), chunks[0]->cuda_hash_table->getLens(), chunks[0]->cuda_hash_table->getBucketsNum()*sizeof(UInt32), hipMemcpyDeviceToHost, chunks[0]->stream ) ); CUDA_SAFE_CALL( hipMemcpyAsync ( chunks[0]->host_buffer_agg_res_keys->getOffsets(), chunks[0]->cuda_hash_table->getOffsets(), chunks[0]->cuda_hash_table->getBucketsNum()*sizeof(UInt32), hipMemcpyDeviceToHost, chunks[0]->stream ) ); CUDA_SAFE_CALL( hipMemcpyAsync ( chunks[0]->host_group_agg_res->getData(), chunks[0]->group_agg_res->getData(), chunks[0]->cuda_hash_table->getBucketsNum()*aggregate_function->cudaSizeOfData(), hipMemcpyDeviceToHost, chunks[0]->stream ) ); CUDA_SAFE_CALL( hipStreamSynchronize ( chunks[0]->stream ) ); auto host_e1 = steady_clock::now(); for (size_t j = 0;j < chunks[0]->cuda_hash_table->getBucketsNum();++j) { UInt32 len = chunks[0]->host_buffer_agg_res_keys->getLens()[j], offset = chunks[0]->host_buffer_agg_res_keys->getOffsets()[j]; if (len == DBMS_CUDA_EMPTY_LEN_VAL) continue; std::string key_str(chunks[0]->host_buffer_agg_res_keys->getBuf() + offset, len-1); CudaAggregateDataPtr res = chunks[0]->host_group_agg_res->getData() + j*aggregate_function->cudaSizeOfData(); auto it = chunks[0]->agg_result.find(key_str); if (it == chunks[0]->agg_result.end()) { chunks[0]->agg_result[key_str] = res; } else { throw std::logic_error("CudaStringsAggregator::waitProcessed: seems there are duplicates in GPU table"); } } auto host_e2 = steady_clock::now(); auto host_t = duration_cast<milliseconds>(host_e2 - host_e1); std::cout << "CudaStringsAggregator::waitProcessed: time for placing data into cpu hash table " << host_t.count() << "ms" << std::endl; } bool CudaStringsAggregator::tryQueueData(size_t str_num, size_t str_buf_sz, const char *str_buf, const OffsetType *offsets, size_t vals_str_buf_sz, const char *vals_str_buf, const OffsetType *vals_offsets) { std::unique_lock<std::mutex> lck( chunks[curr_filling_chunk]->cuda_buffer_mtx ); chunks[curr_filling_chunk]->cv_cuda_processing_end.wait( lck, [this]{return !chunks[curr_filling_chunk]->cuda_processing_state;} ); if (chunks[curr_filling_chunk]->cuda_buffer_keys->hasSpace(str_num, str_buf_sz) && chunks[curr_filling_chunk]->cuda_buffer_vals->hasSpace(str_num, vals_str_buf_sz) ) { chunks[curr_filling_chunk]->cuda_buffer_keys->addData(str_num, str_buf_sz, str_buf, offsets, copy_stream); if (is_vals_needed) chunks[curr_filling_chunk]->cuda_buffer_vals->addData(str_num, vals_str_buf_sz, vals_str_buf, vals_offsets, copy_stream); return true; } else { if (chunks[curr_filling_chunk]->cuda_buffer_keys->empty()) throw std::runtime_error("CudaStringsAggregator: seems there is not enough space in buffer"); waitQueueData(); chunks[curr_filling_chunk]->cuda_processing_state = true; chunks[curr_filling_chunk]->cv_buffer_append_end.notify_one(); curr_filling_chunk = (curr_filling_chunk+1)%chunks.size(); return false; } } __global__ void kerFillMaxHash( UInt32 str_num, UInt32 max_str_num, UInt64 *hashes ) { UInt32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < str_num) return; if (!(i < max_str_num)) return; hashes[i] = 0xFFFFFFFFFFFFFFFF; } void CudaStringsAggregator::processChunk(size_t i) { CUDA_SAFE_CALL(hipSetDevice(dev_number)); chunks[i]->cuda_hash_table->erase(chunks[i]->stream); aggregate_function->cudaInitAggregateData(chunks[i]->cuda_hash_table->getBucketsNum(), chunks[i]->group_agg_res->getData(), chunks[i]->stream); while (1) { { std::cout << "CudaStringsAggregator::processChunk(i = " << i << "): waiting data..." << std::endl; std::unique_lock<std::mutex> lck( chunks[i]->cuda_buffer_mtx ); chunks[i]->cv_buffer_append_end.wait( lck, [this,i]{return chunks[i]->cuda_processing_state;} ); /// we agreed that empty buffer means end of processing if (chunks[i]->cuda_buffer_keys->empty()) break; std::cout << "CudaStringsAggregator::processChunk(i = " << i << "): calc Lengths" << std::endl; chunks[i]->cuda_buffer_keys->calcLengths( chunks[i]->stream ); if (is_vals_needed) chunks[i]->cuda_buffer_vals->calcLengths( chunks[i]->stream ); size_t str_num = chunks[i]->cuda_buffer_keys->getStrNum(); chunks[i]->cuda_hash_table->addData(str_num, chunks[i]->cuda_buffer_keys->getBuf(), chunks[i]->cuda_buffer_keys->getOffsets(), chunks[i]->cuda_buffer_keys->getLens(), thrust::raw_pointer_cast(chunks[i]->group_nums.data()), chunks[i]->stream); aggregate_function->cudaAddBulk(chunks[i]->group_agg_res->getData(), chunks[i]->cuda_buffer_vals, str_num, thrust::raw_pointer_cast(chunks[i]->group_nums.data()), chunks[i]->agg_tmp_buf->getData(), chunks[i]->stream); chunks[i]->cuda_buffer_keys->reset(); chunks[i]->cuda_buffer_vals->reset(); chunks[i]->cuda_processing_state = false; chunks[i]->cv_cuda_processing_end.notify_one(); } } chunks[i]->cuda_hash_table->calcOffsets(chunks[i]->stream); CUDA_SAFE_CALL( hipStreamSynchronize ( chunks[i]->stream ) ); } CudaStringsAggregator::~CudaStringsAggregator() { } }
d93cbe666e492260bb535abe03dab8fcd40f2ad6.cu
#include <stdexcept> #include <chrono> #include <Common/Cuda/CudaSafeCall.h> #include <Common/Cuda/cudaCalcMurmurHash64.h> #include <Common/Cuda/cudaCalcMurmurHash64.h> #include <Interpreters/Cuda/CudaStringsAggregator.h> //#include "hash_sort_alg_magic_numbers.h" using std::chrono::steady_clock; using std::chrono::duration_cast; using std::chrono::milliseconds; namespace DB { CudaStringsAggregator::CudaStringsAggregator(int dev_number_, size_t chunks_num_, UInt32 hash_table_max_size_, UInt32 hash_table_str_buffer_max_size_, UInt32 buffer_max_str_num_, UInt32 buffer_max_size_, CudaAggregateFunctionPtr aggregate_function_) : dev_number(dev_number_), aggregate_function(aggregate_function_) { CUDA_SAFE_CALL(cudaSetDevice(dev_number)); chunks.resize(chunks_num_); /// create cuda streams, allocate host and cuda buffers for (size_t i = 0;i < chunks.size();++i) { chunks[i] = WorkChunkInfoPtr(new WorkChunkInfo()); CUDA_SAFE_CALL(cudaStreamCreate( &chunks[i]->stream )); //chunks[i]->stream = cudaStreamPerThread; chunks[i]->cuda_hash_table = CudaStringsHashTablePtr( new CudaStringsHashTable(hash_table_max_size_, hash_table_str_buffer_max_size_)); chunks[i]->cuda_buffer_keys = CudaColumnStringPtr( new CudaColumnString(buffer_max_str_num_, buffer_max_size_)); chunks[i]->cuda_buffer_vals = CudaColumnStringPtr( new CudaColumnString(buffer_max_str_num_, buffer_max_size_)); chunks[i]->host_buffer_agg_res_keys = CudaHostStringsBufferPtr( new CudaHostStringsBuffer(buffer_max_str_num_, buffer_max_size_)); chunks[i]->group_nums.resize(buffer_max_str_num_); chunks[i]->group_agg_res = CudaArrayPtr<char>( new CudaArray<char>(hash_table_max_size_ * aggregate_function->cudaSizeOfData())); chunks[i]->host_group_agg_res = CudaHostPinnedArrayPtr<char>( new CudaHostPinnedArray<char>(hash_table_max_size_ * aggregate_function->cudaSizeOfData())); chunks[i]->agg_tmp_buf = CudaArrayPtr<char>( new CudaArray<char>( aggregate_function->cudaSizeOfAddBulkInternalBuf(buffer_max_str_num_) )); } CUDA_SAFE_CALL(cudaStreamCreate( &copy_stream )); std::cout << "CudaStringsAggregator created" << std::endl; /*chunks.resize(1); chunks[0] = WorkChunkInfoPtr(new WorkChunkInfo()); chunks[0]->host_group_agg_res = CudaHostPinnedArrayPtr<char>( new CudaHostPinnedArray<char>(10 * aggregate_function->cudaSizeOfData()));*/ } struct ProcessChunkParams { ProcessChunkParams(CudaStringsAggregator *agg_, size_t i_) : agg(agg_),i(i_) { } CudaStringsAggregator *agg; size_t i; }; void callProcessChunk(ProcessChunkParams params) { params.agg->processChunk(params.i); } void CudaStringsAggregator::startProcessing() { /// start processing threads for (size_t i = 0;i < chunks.size();++i) { chunks[i]->cuda_processing_state = false; //chunks[i]->t = std::thread(&CudaStringsAggregator::processChunk, this, i); chunks[i]->t = std::thread(callProcessChunk, ProcessChunkParams(this, i)); } /// set current buffer for data appending curr_filling_chunk = 0; is_vals_needed = aggregate_function->isDataNeeded(); } void CudaStringsAggregator::queueData(size_t str_num, size_t str_buf_sz, const char *str_buf, const OffsetType *offsets, size_t vals_str_buf_sz, const char *vals_str_buf, const OffsetType *vals_offsets) { //printf("CudaStringsAggregator::queueData: params %d %d %d %d\n", str_num, str_buf_sz, vals_str_buf_sz, memcpy_threads_num_); while(1) { if (tryQueueData(str_num, str_buf_sz, str_buf, offsets, vals_str_buf_sz, vals_str_buf, vals_offsets)) return; } } void CudaStringsAggregator::waitQueueData()const { CUDA_SAFE_CALL( cudaStreamSynchronize ( copy_stream ) ); } void CudaStringsAggregator::waitProcessed() { { std::unique_lock<std::mutex> lck( chunks[curr_filling_chunk]->cuda_buffer_mtx ); chunks[curr_filling_chunk]->cv_cuda_processing_end.wait( lck, [this]{return !chunks[curr_filling_chunk]->cuda_processing_state;} ); if (!chunks[curr_filling_chunk]->cuda_buffer_keys->empty()) { chunks[curr_filling_chunk]->cuda_processing_state = true; chunks[curr_filling_chunk]->cv_buffer_append_end.notify_one(); } } /// wait till host to gpu copy ends, 'send' empty buffer to signal end of data for (size_t i = 0;i < chunks.size();++i) { std::unique_lock<std::mutex> lck( chunks[i]->cuda_buffer_mtx ); chunks[i]->cv_cuda_processing_end.wait( lck, [this,i]{return !chunks[i]->cuda_processing_state;} ); if (!chunks[i]->cuda_buffer_keys->empty()) throw std::logic_error("CudaStringsAggregator: host buffer is not empty after transfer"); //setting cuda_processing_state with empty buffer means end of processing chunks[i]->cuda_processing_state = true; chunks[i]->cv_buffer_append_end.notify_one(); } /// wait for processes for termination for (size_t i = 0;i < chunks.size();++i) { chunks[i]->t.join(); } /// combine data from different chunks for (size_t i = 1;i < chunks.size();++i) { chunks[i]->cuda_hash_table->mergeToOtherTable(chunks[0]->cuda_hash_table, thrust::raw_pointer_cast(chunks[i]->group_nums.data()), chunks[0]->stream); aggregate_function->cudaMergeBulk(chunks[0]->group_agg_res->getData(), chunks[i]->cuda_hash_table->getBucketsNum(), chunks[i]->group_agg_res->getData(), thrust::raw_pointer_cast(chunks[i]->group_nums.data()), chunks[0]->stream); } CUDA_SAFE_CALL( cudaMemcpyAsync ( chunks[0]->host_buffer_agg_res_keys->getBuf(), chunks[0]->cuda_hash_table->getStrBuf(), chunks[0]->cuda_hash_table->getStrBufSz(), cudaMemcpyDeviceToHost, chunks[0]->stream ) ); /// TODO get rid of sizeof(UInt32) in all following cudaMemcpyAsync!! CUDA_SAFE_CALL( cudaMemcpyAsync ( chunks[0]->host_buffer_agg_res_keys->getLens(), chunks[0]->cuda_hash_table->getLens(), chunks[0]->cuda_hash_table->getBucketsNum()*sizeof(UInt32), cudaMemcpyDeviceToHost, chunks[0]->stream ) ); CUDA_SAFE_CALL( cudaMemcpyAsync ( chunks[0]->host_buffer_agg_res_keys->getOffsets(), chunks[0]->cuda_hash_table->getOffsets(), chunks[0]->cuda_hash_table->getBucketsNum()*sizeof(UInt32), cudaMemcpyDeviceToHost, chunks[0]->stream ) ); CUDA_SAFE_CALL( cudaMemcpyAsync ( chunks[0]->host_group_agg_res->getData(), chunks[0]->group_agg_res->getData(), chunks[0]->cuda_hash_table->getBucketsNum()*aggregate_function->cudaSizeOfData(), cudaMemcpyDeviceToHost, chunks[0]->stream ) ); CUDA_SAFE_CALL( cudaStreamSynchronize ( chunks[0]->stream ) ); auto host_e1 = steady_clock::now(); for (size_t j = 0;j < chunks[0]->cuda_hash_table->getBucketsNum();++j) { UInt32 len = chunks[0]->host_buffer_agg_res_keys->getLens()[j], offset = chunks[0]->host_buffer_agg_res_keys->getOffsets()[j]; if (len == DBMS_CUDA_EMPTY_LEN_VAL) continue; std::string key_str(chunks[0]->host_buffer_agg_res_keys->getBuf() + offset, len-1); CudaAggregateDataPtr res = chunks[0]->host_group_agg_res->getData() + j*aggregate_function->cudaSizeOfData(); auto it = chunks[0]->agg_result.find(key_str); if (it == chunks[0]->agg_result.end()) { chunks[0]->agg_result[key_str] = res; } else { throw std::logic_error("CudaStringsAggregator::waitProcessed: seems there are duplicates in GPU table"); } } auto host_e2 = steady_clock::now(); auto host_t = duration_cast<milliseconds>(host_e2 - host_e1); std::cout << "CudaStringsAggregator::waitProcessed: time for placing data into cpu hash table " << host_t.count() << "ms" << std::endl; } bool CudaStringsAggregator::tryQueueData(size_t str_num, size_t str_buf_sz, const char *str_buf, const OffsetType *offsets, size_t vals_str_buf_sz, const char *vals_str_buf, const OffsetType *vals_offsets) { std::unique_lock<std::mutex> lck( chunks[curr_filling_chunk]->cuda_buffer_mtx ); chunks[curr_filling_chunk]->cv_cuda_processing_end.wait( lck, [this]{return !chunks[curr_filling_chunk]->cuda_processing_state;} ); if (chunks[curr_filling_chunk]->cuda_buffer_keys->hasSpace(str_num, str_buf_sz) && chunks[curr_filling_chunk]->cuda_buffer_vals->hasSpace(str_num, vals_str_buf_sz) ) { chunks[curr_filling_chunk]->cuda_buffer_keys->addData(str_num, str_buf_sz, str_buf, offsets, copy_stream); if (is_vals_needed) chunks[curr_filling_chunk]->cuda_buffer_vals->addData(str_num, vals_str_buf_sz, vals_str_buf, vals_offsets, copy_stream); return true; } else { if (chunks[curr_filling_chunk]->cuda_buffer_keys->empty()) throw std::runtime_error("CudaStringsAggregator: seems there is not enough space in buffer"); waitQueueData(); chunks[curr_filling_chunk]->cuda_processing_state = true; chunks[curr_filling_chunk]->cv_buffer_append_end.notify_one(); curr_filling_chunk = (curr_filling_chunk+1)%chunks.size(); return false; } } __global__ void kerFillMaxHash( UInt32 str_num, UInt32 max_str_num, UInt64 *hashes ) { UInt32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < str_num) return; if (!(i < max_str_num)) return; hashes[i] = 0xFFFFFFFFFFFFFFFF; } void CudaStringsAggregator::processChunk(size_t i) { CUDA_SAFE_CALL(cudaSetDevice(dev_number)); chunks[i]->cuda_hash_table->erase(chunks[i]->stream); aggregate_function->cudaInitAggregateData(chunks[i]->cuda_hash_table->getBucketsNum(), chunks[i]->group_agg_res->getData(), chunks[i]->stream); while (1) { { std::cout << "CudaStringsAggregator::processChunk(i = " << i << "): waiting data..." << std::endl; std::unique_lock<std::mutex> lck( chunks[i]->cuda_buffer_mtx ); chunks[i]->cv_buffer_append_end.wait( lck, [this,i]{return chunks[i]->cuda_processing_state;} ); /// we agreed that empty buffer means end of processing if (chunks[i]->cuda_buffer_keys->empty()) break; std::cout << "CudaStringsAggregator::processChunk(i = " << i << "): calc Lengths" << std::endl; chunks[i]->cuda_buffer_keys->calcLengths( chunks[i]->stream ); if (is_vals_needed) chunks[i]->cuda_buffer_vals->calcLengths( chunks[i]->stream ); size_t str_num = chunks[i]->cuda_buffer_keys->getStrNum(); chunks[i]->cuda_hash_table->addData(str_num, chunks[i]->cuda_buffer_keys->getBuf(), chunks[i]->cuda_buffer_keys->getOffsets(), chunks[i]->cuda_buffer_keys->getLens(), thrust::raw_pointer_cast(chunks[i]->group_nums.data()), chunks[i]->stream); aggregate_function->cudaAddBulk(chunks[i]->group_agg_res->getData(), chunks[i]->cuda_buffer_vals, str_num, thrust::raw_pointer_cast(chunks[i]->group_nums.data()), chunks[i]->agg_tmp_buf->getData(), chunks[i]->stream); chunks[i]->cuda_buffer_keys->reset(); chunks[i]->cuda_buffer_vals->reset(); chunks[i]->cuda_processing_state = false; chunks[i]->cv_cuda_processing_end.notify_one(); } } chunks[i]->cuda_hash_table->calcOffsets(chunks[i]->stream); CUDA_SAFE_CALL( cudaStreamSynchronize ( chunks[i]->stream ) ); } CudaStringsAggregator::~CudaStringsAggregator() { } }
4f06112d08711738de05b4c8402563706cfe6841.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ConvoyTracker.cu * * Created on: 06.06.2016 * Author: Sebastian Reinhart * * FOR DETAILED COMMENTS SEE CONVOY_TRACKER_ZERO_COPY */ #include "ConvoyTracker.cuh" #include <assert.h> ConvoyTracker::ConvoyTracker() { currentSpeed = 0; currentYawRate = 0; x = 0; y = 0; yaw = 0; xOld = 0; yOld = 0; yawOld = 0; ID = 0; convoyID = 0; currentHistoryOnDevice = false; currentConvoyOnDevice = false; convoySize = 0; startIndexConvoys = 0; endIndexConvoys = 0; historySize = 0; startIndexHistory = 0; endIndexHistory = 0; convoyCheckSize = 0; intervalSize = 0; hipError_t error; hipStreamCreate(&stream2); hipStreamCreate(&stream3); hipStreamCreate(&stream4); hipStreamCreate(&stream5); error = hipHostMalloc((void**) &history, NUM_HIST*sizeof(History), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_history, NUM_HIST*sizeof(History)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_historyMatch, MAX_SEGMENTS*sizeof(int), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_historyMatch, MAX_SEGMENTS*sizeof(int)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_historyMatchSelf, sizeof(int), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_historyMatchSelf, sizeof(int)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_intervalMap, MAX_SEGMENTS*sizeof(PointCellDevice), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_intervalMap, MAX_SEGMENTS*sizeof(PointCellDevice)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_convoyCheck, MAX_SEGMENTS*sizeof(PointCellDevice), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_newVeh, MAX_SEGMENTS*sizeof(PointCellDevice)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_IDincluded, NUM_HIST*2*sizeof(int), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_IDincluded, NUM_HIST*2*sizeof(int)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_duplicate, NUM_HIST*sizeof(bool), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_duplicate, NUM_HIST*sizeof(bool)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_updateData, MAX_SEGMENTS*3*sizeof(float), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_updataData, MAX_SEGMENTS*3*sizeof(float)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_intvlIndex, MAX_SEGMENTS*sizeof(int), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_intvlIndex, MAX_SEGMENTS*sizeof(int)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_vehicles, MAX_SEGMENTS*sizeof(PointCellDevice), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_vehicles, MAX_SEGMENTS*sizeof(PointCellDevice)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &h_distance, MAX_SEGMENTS*MAX_SEGMENTS*sizeof(float), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_distance, MAX_SEGMENTS*MAX_SEGMENTS*sizeof(float)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } size_t sizeConv = NUM_CONV; sizeConv *= sizeof(Convoy); error = hipHostMalloc((void **) &convoys, sizeConv, hipHostMallocDefault); if(error != hipSuccess) { printf( "hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **) &d_convoys, NUM_CONV*sizeof(Convoy)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipHostMalloc((void**) &xSubInterval, sizeof(float), hipHostMallocMapped); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } *xSubInterval = 0; error = hipHostGetDevicePointer(&d_subIntvl_ptr, xSubInterval, 0); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } } ConvoyTracker::~ConvoyTracker() { hipHostFree(xSubInterval); hipHostFree(convoys); hipHostFree(history); hipHostFree(h_historyMatch); hipHostFree(h_convoyCheck); hipHostFree(h_intervalMap); hipHostFree(h_historyMatchSelf); hipHostFree(h_IDincluded); hipHostFree(h_vehicles); hipHostFree(h_distance); hipHostFree(h_duplicate); hipHostFree(h_intvlIndex); hipFree(d_convoys); hipFree(d_history); hipFree(d_historyMatch); hipFree(d_newVeh); hipFree(d_intervalMap); hipFree(d_historyMatchSelf); hipFree(d_IDincluded); hipFree(d_vehicles); hipFree(d_distance); hipFree(d_duplicate); hipFree(d_intvlIndex); hipStreamDestroy(stream2); hipStreamDestroy(stream3); hipStreamDestroy(stream4); hipStreamDestroy(stream5); } std::string getNextMeasureAsString(int i) { std::ostringstream number; if(i<10) { number << "000" << i; } else if(i<100) { number << "00" << i; } else if(i<1000) { number << "0" << i; } else { number << i; } return number.str(); } __device__ void shiftRotateHistory(History* d_pc, float x, float y, float theta, int index) { //update history if(((index < d_pc->endIndex) && (d_pc->endIndex > d_pc->startIndex)) || ((d_pc->endIndex < d_pc->startIndex) && (index != d_pc->endIndex))) { d_pc->tracks[index].subIntvl += x; int numIntervals = (int) ((d_pc->tracks[index].subIntvl) / INTERVALL_LENGTH); d_pc->tracks[index].x -= numIntervals; d_pc->tracks[index].subIntvl -= numIntervals; float angleInRadians = theta*((float)M_PI)/180.0f; float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) }, { sinf(angleInRadians), cosf(angleInRadians) } }; //update history d_pc->tracks[index].y -= y; d_pc->tracks[index].theta -= angleInRadians; float xAbs = d_pc->tracks[index].x; float yAbs = d_pc->tracks[index].y; xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs; yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs; d_pc->tracks[index].y -= yAbs; d_pc->tracks[index].subIntvl -= xAbs; } } __device__ void shiftRotateConvoy(Convoy* d_eml, float x, float y, float theta, int index) { if(((index < d_eml->endIndexTracks) && (d_eml->endIndexTracks > d_eml->startIndexTracks)) || ((d_eml->endIndexTracks < d_eml->startIndexTracks) && (index != d_eml->endIndexTracks))) { d_eml->tracks[index].subIntvl += x; int numIntervals = (int) ((d_eml->tracks[index].subIntvl) / INTERVALL_LENGTH); d_eml->tracks[index].x -= numIntervals; d_eml->tracks[index].subIntvl -= numIntervals; float angleInRadians = theta*((float)M_PI)/180.0f; float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) }, { sinf(angleInRadians), cosf(angleInRadians) } }; d_eml->tracks[index].y -= y; d_eml->tracks[index].theta -= angleInRadians; float xAbs = d_eml->tracks[index].x; float yAbs = d_eml->tracks[index].y; xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs; yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs; d_eml->tracks[index].y -= yAbs; d_eml->tracks[index].subIntvl -= xAbs; } } __device__ void computeIntervalMap(PointCellDevice* d_interval, float xMotion, float yMotion, float angle, float* xSubInterval) { float angleInRadians = angle * ((float)M_PI) / 180.0f; *xSubInterval += xMotion; int numIntervals = (int) (*xSubInterval / INTERVALL_LENGTH); *xSubInterval -= numIntervals; for (int i = 0; i < numIntervals; i++) { float x = d_interval->getX(); int interval = floor(x) + CARINTERVAL; if(interval == 0) { //delete content d_interval->setX(-10000); continue; } d_interval->setX(floor(x) - 0.5f); } int interval = floor(d_interval->getX()); //1.Step correct directions of stored PCs d_interval->setY(d_interval->getY() - yMotion); d_interval->setTheta(d_interval->getTheta() - angleInRadians); //2. compensate rotation float xAbs = ( interval - CARINTERVAL + 0.5f) * INTERVALL_LENGTH - *xSubInterval; float yAbs = d_interval->getY(); float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) }, { sinf(angleInRadians), cosf(angleInRadians) } }; xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs; yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs; d_interval->setY(d_interval->getY() - yAbs); if(xAbs > 0.5f*INTERVALL_LENGTH) { d_interval->setX(interval + 1.5f); } else if(xAbs < -0.5f*INTERVALL_LENGTH) { d_interval->setX(interval - 0.5f); } else { d_interval->subInvtl -= xAbs; } } __device__ bool findHistoryMatch(PointCellDevice* trackedVehicles, History* d_history, int index) { bool result = (d_history->ID != trackedVehicles->getID()); result = (result && (d_history->tracks[index].x - 0.5f <= trackedVehicles->getX())); result = (result && (trackedVehicles->getX() <= d_history->tracks[index].x + 0.5f)); result = (result && (d_history->tracks[index].y - 1.0f <= trackedVehicles->getY())); result = (result && (trackedVehicles->getY() <= d_history->tracks[index].y + 1.0f)); return result; } __device__ bool findHistoryMatchSelf(History* d_history, int index) { bool result = true; result = (result && (d_history->tracks[index].x - 0.5f <= 0.0f)); result = (result && (0 <= d_history->tracks[index].x + 0.5f)); result = (result && (d_history->tracks[index].y - 1.0f <= 0.0f)); result = (result && (0 <= d_history->tracks[index].y + 1.0f)); return result; } __device__ void predictDevice(PointCellDevice* vehicle, int index) { int state = index%5; //row int i = index / 5 ; //column int j = state; vehicle->data[state+5] = vehicle->data[state]; __syncthreads(); vehicle->computeF(); vehicle->computeCovarianceF(); float tmp = 0; //Tmp = F*P for(int k=0; k<5; k++) { tmp += vehicle->getF(i,k)*vehicle->getP(k,j); } vehicle->writeTmp(i,j, tmp); __syncthreads(); //P = tmp*F_t tmp = 0; for(int k=0; k<5; k++) { tmp += vehicle->getTmp(i,k)*vehicle->getF(j,k); } vehicle->writeP(i,j, tmp); __syncthreads(); //P = P+Q tmp = vehicle->getP(i,j) + vehicle->getQ(i,j); vehicle->writeP(i,j, tmp); } __global__ void compensateEgoMotionMap(PointCellDevice* d_interval, float* d_subIntvl, float x, float y, float angle) { computeIntervalMap(&(d_interval[threadIdx.x]), x, y, angle, d_subIntvl); } __global__ void compensateEgoMotionHistory(History* d_history, float x, float y, float angle) { shiftRotateHistory(&(d_history[blockIdx.x]), x, y, angle, threadIdx.x); } __global__ void compensateEgoMotionConvoy(Convoy* d_convoy, float x, float y, float angle) { shiftRotateConvoy(&(d_convoy[blockIdx.x]), x, y, angle, threadIdx.x); } __global__ void findConvoyDevice(PointCellDevice* trackedVehicles, History* d_history, int* d_historyMatch) { if(((threadIdx.x < d_history[blockIdx.x].endIndex) && (d_history[blockIdx.x].endIndex > d_history[blockIdx.x].startIndex)) || ((d_history[blockIdx.x].endIndex < d_history[blockIdx.x].startIndex) && (threadIdx.x != d_history[blockIdx.x].endIndex))) { if(findHistoryMatch(&(trackedVehicles[blockIdx.y]),&(d_history[blockIdx.x]),threadIdx.x)) { atomicMin(&(d_historyMatch[blockIdx.y]), d_history[blockIdx.x].ID); } } } __global__ void findConvoyDeviceSelf(History* d_history, int* d_historyMatchSelf) { if(((threadIdx.x < d_history[blockIdx.x].endIndex) && (d_history[blockIdx.x].endIndex > d_history[blockIdx.x].startIndex)) || ((d_history[blockIdx.x].endIndex < d_history[blockIdx.x].startIndex) && (threadIdx.x != d_history[blockIdx.x].endIndex))) { if(findHistoryMatchSelf(&(d_history[blockIdx.x]),threadIdx.x)) { atomicMin(d_historyMatchSelf, d_history[blockIdx.x].ID); } } } __global__ void memSetHistoryMatch(int* d_historyMatch) { d_historyMatch[threadIdx.x] = INT_MAX; } /* * Run Kalman-Filter Predict on Device with #vehicles as Blocks and 25 Threads per Block */ __global__ void predict(PointCellDevice* d_interval) { predictDevice(&(d_interval[blockIdx.x]), threadIdx.x); } /* * Run Kalman-Filter Update on Device with 25 Threads */ __device__ void updateDevice(PointCellDevice* d_interval, int index, float velocity, float phi, float xNew, float yNew, float thetaNew) { //row int i = index / 5; //column int j = index % 5; float tmp = 0; //tmp = H*P for(int k=0; k<5; k++) { tmp += d_interval->getH(i,k)*d_interval->getP(k,j); } d_interval->writeTmp(i,j, tmp); __syncthreads(); //S = tmp*H_t tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getTmp(i,k)*d_interval->getH(j,k); } d_interval->writeS(i,j, tmp); __syncthreads(); //S = S+R tmp = d_interval->getS(i,j) + d_interval->getR(i,j); d_interval->writeS(i,j, tmp); __syncthreads(); //tmp = P*H_t tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getP(i,k)*d_interval->getH(j,k); } d_interval->writeTmp(i,j, tmp); __syncthreads(); //invertS if(threadIdx.x == 0) { d_interval->invertS(); } __syncthreads(); //K = tmp*S_i tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getTmp(i,k)*d_interval->getS(k,j); } d_interval->writeK(i,j, tmp); __syncthreads(); //tmp = K*(newState-stateVector) tmp = 0; tmp += d_interval->getK(i,0)*(xNew-d_interval->getX()); tmp += d_interval->getK(i,1)*(yNew-d_interval->getY()); tmp += d_interval->getK(i,2)*(thetaNew-d_interval->getTheta()); tmp += d_interval->getK(i,3)*(velocity-d_interval->getVelocity()); tmp += d_interval->getK(i,4)*(phi-d_interval->getPhi()); d_interval->writeTmp(i,j, tmp); __syncthreads(); //stateVector = stateVector + tmp if(threadIdx.x == 0) { d_interval->setX(d_interval->getX() + d_interval->getTmp(0,0)); d_interval->setY(d_interval->getY() + d_interval->getTmp(1,0)); d_interval->setTheta(d_interval->getTheta() + d_interval->getTmp(2,0)); d_interval->setVelocity(d_interval->getVelocity() + d_interval->getTmp(3,0)); d_interval->setPhi(d_interval->getPhi() + d_interval->getTmp(4,0)); } __syncthreads(); //tmp = K*H tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getK(i,k)*d_interval->getH(k,j); } d_interval->writeTmp(i,j, tmp); __syncthreads(); //tmp = I- tmp tmp = d_interval->getI(i,j) - d_interval->getTmp(i,j); d_interval->writeTmp(i,j, tmp); __syncthreads(); //tmp2 = tmp*P tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getTmp(i,k)*d_interval->getP(k,j); } d_interval->writeTmp2(i,j, tmp); __syncthreads(); d_interval->writeP(i,j, d_interval->getTmp2(i,j)); } __global__ void updateOne(PointCellDevice* d_interval, int index, float velocity, float phi, float xNew, float yNew, float thetaNew) { updateDevice(d_interval,threadIdx.x,velocity, phi,xNew,yNew,thetaNew); } __global__ void updateKernel(PointCellDevice* d_intvl, float* d_updateData, int* d_intvlIndex) { int index = d_intvlIndex[blockIdx.x]; float xNew = d_updateData[blockIdx.x*3]; float yNew = d_updateData[blockIdx.x*3+1]; float thetaNew = d_updateData[blockIdx.x*3+2]; float x = d_intvl[index].data[5]; float y = d_intvl[index].data[6]; float theta = d_intvl[index].data[7]; float velocity = sqrtf((xNew - x) * (xNew - x) + (yNew - y)*(yNew - y)) / TIMESTAMP; float phi = (thetaNew-theta) / TIMESTAMP; if(threadIdx.x == 0) { d_intvl[index].setVelocity(velocity); d_intvl[index].setPhi(phi); } updateDevice(&(d_intvl[index]),threadIdx.x,velocity, phi,xNew,yNew,thetaNew); } __global__ void findIDInConvoyDevice(Convoy* d_convoy, int* d_IDIncluded, int id1, int id2) { if(((threadIdx.x < d_convoy[blockIdx.x].endIndexID) && (d_convoy[blockIdx.x].endIndexID > d_convoy[blockIdx.x].startIndexID)) || ((d_convoy[blockIdx.x].endIndexID < d_convoy[blockIdx.x].startIndexID) && (threadIdx.x != d_convoy[blockIdx.x].endIndexID))) { int index = blockIdx.x*2; d_IDIncluded[index] = INT_MAX; d_IDIncluded[index+1] = INT_MAX; __syncthreads(); int result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == id1); if(result) { atomicMin(&(d_IDIncluded[index]), threadIdx.x); } result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == id2); if(result) { atomicMin(&(d_IDIncluded[index+1]), threadIdx.x); } result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == -1); if(result) { atomicMin(&(d_IDIncluded[index+1]), INT_MIN); atomicMin(&(d_IDIncluded[index]), INT_MIN); } } } __global__ void checkConvoyForDuplicateDevice(Convoy* d_convoy, PointCellDevice* d_vehicle, bool* d_duplicate) { if(((threadIdx.x < d_convoy[blockIdx.x].endIndexTracks) && (d_convoy[blockIdx.x].endIndexTracks > d_convoy[blockIdx.x].startIndexTracks)) || ((d_convoy[blockIdx.x].endIndexTracks < d_convoy[blockIdx.x].startIndexTracks) && (threadIdx.x != d_convoy[blockIdx.x].endIndexTracks))) { d_duplicate[blockIdx.x] = true; __syncthreads(); bool result = (d_convoy[blockIdx.x].tracks[threadIdx.x].x != (floor(d_vehicle->getX())+0.5f)); if(!result) { d_duplicate[blockIdx.x] = d_duplicate[blockIdx.x] && result; } } } __global__ void checkHistoryForDuplicateDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded, bool* d_duplicate) { if(((threadIdx.x < d_history[d_IDincluded[blockIdx.x]].endIndex) && (d_history[d_IDincluded[blockIdx.x]].endIndex > d_history[d_IDincluded[blockIdx.x]].startIndex)) || ((d_history[d_IDincluded[blockIdx.x]].endIndex < d_history[d_IDincluded[blockIdx.x]].startIndex) && (threadIdx.x != d_history[d_IDincluded[blockIdx.x]].endIndex))) { d_duplicate[blockIdx.x] = true; int index = d_intvlIndex[blockIdx.x]; int intvl = floor(d_intvl[index].getX()); intvl += 0.5f; if(d_history[d_IDincluded[blockIdx.x]].tracks[threadIdx.x].x == intvl) { d_duplicate[blockIdx.x] = false; } } } __global__ void checkConvoyForDuplicateDeviceSelf(Convoy* d_convoy, bool* d_duplicate) { if(((threadIdx.x < d_convoy[blockIdx.x].endIndexTracks) && (d_convoy[blockIdx.x].endIndexTracks > d_convoy[blockIdx.x].startIndexTracks)) || ((d_convoy[blockIdx.x].endIndexTracks < d_convoy[blockIdx.x].startIndexTracks) && (threadIdx.x != d_convoy[blockIdx.x].endIndexTracks))) { d_duplicate[blockIdx.x] = true; bool result = (d_convoy[blockIdx.x].tracks[threadIdx.x].x != 0.5f); if(!result) { d_duplicate[blockIdx.x] = d_duplicate[blockIdx.x] && result; } } } __global__ void findHistoryWithIDDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded) { int index = d_intvlIndex[threadIdx.x]; int ID = d_intvl[index].getID(); if(d_history[blockIdx.x].ID == ID) { d_IDincluded[threadIdx.x] = blockIdx.x; } } __global__ void addUpdatedPositionToHistoryDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded, bool* d_duplicate) { int intvl = floor(d_intvl[d_intvlIndex[threadIdx.x]].getX()); d_intvl[d_intvlIndex[threadIdx.x]].setX(intvl+ 0.5f); int historyIndex = d_IDincluded[threadIdx.x]; if(d_duplicate[threadIdx.x]) { int index = d_history[historyIndex].endIndex; d_history[historyIndex].tracks[index].subIntvl = 0.5f; d_history[historyIndex].tracks[index].x = d_intvl[d_intvlIndex[threadIdx.x]].getX(); d_history[historyIndex].tracks[index].y = d_intvl[d_intvlIndex[threadIdx.x]].getY(); d_history[historyIndex].tracks[index].theta = d_intvl[d_intvlIndex[threadIdx.x]].getTheta(); index = (index+1)%MAX_LENGTH_HIST_CONV; d_history[historyIndex].endIndex = index; if(index == d_history[historyIndex].startIndex) { d_history[historyIndex].startIndex = (d_history[historyIndex].startIndex+1)%NUM_HIST; } } } __global__ void computeDistancesDevice(PointCellDevice* d_vehicles, PointCellDevice* d_intvl, float* d_distance) { int index = blockIdx.x*MAX_SEGMENTS + threadIdx.x; float x = d_vehicles[blockIdx.x].getX(); float y = d_vehicles[blockIdx.x].getY(); float theta = d_vehicles[blockIdx.x].getTheta(); float x1 = d_intvl[threadIdx.x].getX(); float y1 = d_intvl[threadIdx.x].getY(); float theta1 = d_intvl[threadIdx.x].getTheta(); d_distance[index] = sqrtf((x - x1)*(x - x1) + (y - y1)*(y - y1) + (theta - theta1)*(theta - theta1)); } int main() { #ifdef CREATE_MEASURES PGMReader pgmReader; double speed = 4.0/3.0; for(int i=0; i<NUM_MEASUREMENT; i++) { std::string number = getNextMeasureAsString(i); pgmReader.simulateLaserRays(number); /* std::ofstream EMLMeasureFile; std::ostringstream measurePath; measurePath << "./Laserdata/EML" << number << ".txt"; EMLMeasureFile.open (measurePath.str().c_str()); EMLMeasureFile << ((double)i)*speed << " 0 0 120 0" << std::endl; EMLMeasureFile.close();*/ } #endif int devID = 0; hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (deviceProp.canMapHostMemory != 1){ fprintf(stderr, "Device cannot map memory!\n"); return 1; } if (error != hipSuccess) { printf( "hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } else { #ifdef PRINT printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); #endif } hipSetDeviceFlags(hipDeviceMapHost); hipEvent_t startEvent, stopEvent, start2Event, stop2Event; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); hipEventCreate(&start2Event); hipEventCreate(&stop2Event); float time = 0; hipEventRecord(startEvent, 0); ConvoyTracker tracker; hipEventRecord(stopEvent, 0); hipEventSynchronize(stopEvent); hipEventElapsedTime(&time,startEvent,stopEvent); #if SZENARIO == 6 std::vector<PointCellDevice> vehiclesSim; error = hipHostMalloc((void**) &tracker.h_vehicleSim, MAX_SEGMENTS*sizeof(PointCellDevice), hipHostMallocDefault); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipMalloc((void **)&tracker.d_vehicleSim, MAX_SEGMENTS*sizeof(PointCellDevice)); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } for(uint i=0;i <20; i++) { tracker.h_vehicleSim[i].initializeMemory(); if(i%2 == 0) { tracker.h_vehicleSim[i].setY(-3); tracker.h_vehicleSim[i].setVelocity(38.9); } else { tracker.h_vehicleSim[i].setY(3); tracker.h_vehicleSim[i].setVelocity(27.8); } tracker.h_vehicleSim[i].setX((i/2)*8); tracker.h_vehicleSim[i].setTheta(0); tracker.h_vehicleSim[i].setPhi(0); } hipMemcpy(tracker.d_vehicleSim, tracker.h_vehicleSim, MAX_SEGMENTS*sizeof(PointCellDevice), hipMemcpyHostToDevice); #endif hipEventRecord(startEvent, 0); int vehicleCount = 0; float compensateHistory[NUM_MEASUREMENT]; for(int i=0; i<NUM_MEASUREMENT; i++) { hipEventRecord(start2Event, 0); std::vector<PointCellDevice*> trackedVehicles; std::string number = getNextMeasureAsString(i); tracker.readEMLData(number); //1. Compensate own vehicle motion double deltaX = tracker.getX() - tracker.getXOld(); double deltaY = tracker.getY() - tracker.getYOld(); double deltaYaw = tracker.getYaw() - tracker.getYawOld(); float angleInRadians = ((float)deltaYaw) * ((float)M_PI) / 180.0f; float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) }, { sinf(angleInRadians), cosf(angleInRadians) } }; tracker.transformDataToDevice(); if(tracker.historySize > 0) { hipLaunchKernelGGL(( compensateEgoMotionHistory), dim3(tracker.historySize), dim3(MAX_LENGTH_HIST_CONV),0, tracker.stream2, tracker.d_history, deltaX, deltaY, deltaYaw); } vehicleCount = tracker.reader.processLaserData(number,tracker.getCurrentSpeed(), tracker.getCurrentYawRate(), tracker.h_vehicles); if(tracker.convoySize > 0) { hipLaunchKernelGGL(( compensateEgoMotionConvoy), dim3(tracker.convoySize), dim3(MAX_LENGTH_HIST_CONV),0, tracker.stream3, tracker.d_convoys, deltaX, deltaY, deltaYaw); } if(tracker.intervalSize > 0) { hipLaunchKernelGGL(( compensateEgoMotionMap), dim3(1),dim3(tracker.intervalSize),0,tracker.stream4, tracker.d_intervalMap, tracker.d_subIntvl_ptr, deltaX, deltaY, deltaYaw); hipLaunchKernelGGL(( predict), dim3(tracker.intervalSize),dim3(25),0,tracker.stream4, tracker.d_intervalMap); } tracker.transformDataFromDevice(); for(uint k = 0; k < tracker.convoySize; k++) { tracker.convoys[k].highestValue.subIntvl += ((float)deltaX); int numIntervals = (int) ((tracker.convoys[k].highestValue.subIntvl) / INTERVALL_LENGTH); tracker.convoys[k].highestValue.x -= numIntervals; tracker.convoys[k].highestValue.subIntvl -= numIntervals; tracker.convoys[k].highestValue.y -= deltaY; tracker.convoys[k].highestValue.theta -= angleInRadians; float xAbs = tracker.convoys[k].highestValue.x; float yAbs = tracker.convoys[k].highestValue.y; xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs; yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs; tracker.convoys[k].highestValue.y -= yAbs; tracker.convoys[k].highestValue.subIntvl -= xAbs; } for(uint j=0; j<tracker.intervalSize;j++) { trackedVehicles.push_back(&(tracker.h_intervalMap[j])); } //3. Associate and Update #if SZENARIO == 6 tracker.associateAndUpdate(20, trackedVehicles); #else tracker.associateAndUpdate(vehicleCount, trackedVehicles); #endif hipEventRecord(stop2Event, 0); hipEventSynchronize(stop2Event); float time3; hipEventElapsedTime(&time3, start2Event, stop2Event); compensateHistory[i] = time3; } hipEventRecord(stopEvent, 0); hipEventSynchronize(stopEvent); float sumH = 0; for(int i = 0; i< NUM_MEASUREMENT; i++) { sumH += compensateHistory[i]; } sumH /= NUM_MEASUREMENT; #ifdef PRINT std::cout << "Duration of compensate History: " << sumH << std::endl; #endif float time2; hipEventElapsedTime(&time2, startEvent, stopEvent); #ifdef PRINT std::cout << "Overall Time: " << time +time2<< std::endl; #else std::cout << time + time2 << std::endl; #endif #if SZENARIO == 6 hipHostFree(tracker.h_vehicleSim); #endif tracker.visualizeConvoys(); tracker.visualizeHistory(); return 0; } /** * Stores current Speed and yaw rate from file to class variables */ void ConvoyTracker::readEMLData(std::string number) { std::ostringstream measurePath; measurePath << EMLPATH << number << ".txt"; #ifdef PRINT std::cout << measurePath.str() << std::endl; #endif std::ifstream input(measurePath.str().c_str()); std::string line; std::string segment; if(std::getline( input, line )) { std::stringstream ss; ss << line; int dataCnt = 1; while(std::getline(ss, segment, ' ')) { switch (dataCnt) { case 1: { //x in m xOld = x; x = atof(segment.c_str()); ++dataCnt; break; } case 2: { //y in m yOld = y; y = atof(segment.c_str()); ++dataCnt; break; } case 3: { //yaw in yawOld = yaw; yaw = atof(segment.c_str()); ++dataCnt; break; } case 4: { //velocity in kmh //Compute value in m/s currentSpeed = atof(segment.c_str()) / 3.6; ++dataCnt; break; } case 5: { //yaw rate in /s //Compute value in rad/s currentYawRate = atof(segment.c_str()) * M_PI / 180.0; break; } } } EMLPos curPos; curPos.x = x; curPos.y = y; curPos.theta = yaw; EML.push_back(curPos); } } double ConvoyTracker::getCurrentSpeed() const { return currentSpeed; } void ConvoyTracker::setCurrentSpeed(double currentSpeed) { this->currentSpeed = currentSpeed; } double ConvoyTracker::getCurrentYawRate() const { return currentYawRate; } void ConvoyTracker::setCurrentYawRate(double currentYawRate) { this->currentYawRate = currentYawRate; } double ConvoyTracker::getX() const { return x; } void ConvoyTracker::setX(double x) { this->x = x; } double ConvoyTracker::getXOld() const { return xOld; } void ConvoyTracker::setXOld(double old) { this->xOld = old; } double ConvoyTracker::getY() const { return y; } void ConvoyTracker::setY(double y) { this->y = y; } double ConvoyTracker::getYaw() const { return yaw; } void ConvoyTracker::setYaw(double yaw) { this->yaw = yaw; } double ConvoyTracker::getYawOld() const { return yawOld; } void ConvoyTracker::setYawOld(double yawOld) { this->yawOld = yawOld; } double ConvoyTracker::getYOld() const { return yOld; } void ConvoyTracker::setYOld(double old) { yOld = old; } /** * Searches for corresponding vehicles using Global Nearest Neighbor algorithm and updates the results */ void ConvoyTracker::associateAndUpdate(int vehicleCount, std::vector<PointCellDevice*> trackedVehicles) { //initialize all IDs in possible history to -1 to have no false detection in findConvoy hipLaunchKernelGGL(( memSetHistoryMatch), dim3(1),dim3(MAX_SEGMENTS),0,stream2, d_historyMatch); convoyCheckSize = 0; int updateCounter = 0; int indexCounter = trackedVehicles.size(); std::vector<int> indicesToAdd; std::vector<PointCellDevice*> updateCheck; for(uint i = 0; i<vehicleCount; i++) { #if SZENARIO == 6 double x = h_vehicleSim[i].getX(); double y = h_vehicleSim[i].getY(); double theta = h_vehicleSim[i].getTheta(); #else double x = h_vehicles[i].getX(); double y = h_vehicles[i].getY(); double theta = h_vehicles[i].getTheta(); #endif double minDist = INT_MAX; int minIndex = INT_MAX; #ifdef PRINT std::cout << "X: " << x << " Y: " << y << " Theta: " << theta <<std::endl; #endif for(uint j = 0; j<trackedVehicles.size(); j++) { double x1 = trackedVehicles.at(j)->getX(); double y1 = trackedVehicles.at(j)->getY(); double theta1 = trackedVehicles.at(j)->getTheta(); #ifdef PRINT std::cout << "X1: " << x1 << " Y1: " << y1<< " Theta1: " << theta1 <<std::endl; #endif double dist = sqrt((x - x1)*(x - x1) + (y - y1)*(y - y1) + (theta - theta1)*(theta - theta1)); if(dist < minDist) { minDist = dist; minIndex = j; } } #ifdef PRINT std::cout << "Min distance: " << minDist << std::endl; #endif if(minDist > ASSOCIATION_THRESHOLD) { //do not associate vehicles with to big distance in between //create new track instead ++ID; #if SZENARIO == 6 h_vehicleSim[i].setID(ID); #else h_vehicles[i].setID(ID); #endif indicesToAdd.push_back(i); history[endIndexHistory].ID = ID; history[endIndexHistory].tracks[0].subIntvl = 0.5f; #if SZENARIO == 6 history[endIndexHistory].tracks[0].x = h_vehicleSim[i].getX(); history[endIndexHistory].tracks[0].y = h_vehicleSim[i].getY(); history[endIndexHistory].tracks[0].theta = h_vehicleSim[i].getTheta(); #else history[endIndexHistory].tracks[0].x = h_vehicles[i].getX(); history[endIndexHistory].tracks[0].y = h_vehicles[i].getY(); history[endIndexHistory].tracks[0].theta = h_vehicles[i].getTheta(); #endif history[endIndexHistory].startIndex = 0; history[endIndexHistory].endIndex = 1; int index = (endIndexHistory+1)%NUM_HIST; if(index == startIndexHistory) { startIndexHistory = (startIndexHistory+1)%NUM_HIST; } else { ++historySize; } endIndexHistory = index; #ifdef PRINT std::cout << "Added new Vehicle with ID " << ID << std::endl; #endif currentHistoryOnDevice = false; #if SZENARIO == 6 h_convoyCheck[convoyCheckSize] = h_vehicleSim[i]; #else h_convoyCheck[convoyCheckSize] = h_vehicles[i]; #endif ++convoyCheckSize; } else { //vehicle matched, update PointCellDevice* tmp = trackedVehicles.at(trackedVehicles.size() -1 ); PointCellDevice* update = trackedVehicles.at(minIndex); #ifdef PRINT std::cout << "Update ID " << update->getID() << std::endl; #endif trackedVehicles.at(minIndex) = tmp; h_intvlIndex[minIndex] = h_intvlIndex[trackedVehicles.size()-1]; h_intvlIndex[trackedVehicles.size()-1] = minIndex; #if SZENARIO == 6 h_updateData[updateCounter*3] = h_vehicleSim[i].getX(); h_updateData[updateCounter*3+1] = h_vehicleSim[i].getY(); h_updateData[updateCounter*3+2] = h_vehicleSim[i].getTheta(); #else h_updateData[updateCounter*3] = h_vehicles[i].getX(); h_updateData[updateCounter*3+1] = h_vehicles[i].getY(); h_updateData[updateCounter*3+2] = h_vehicles[i].getTheta(); #endif trackedVehicles.pop_back(); #ifdef PRINT std::cout << "Updated vehicle with ID " << update->getID() << std::endl; #endif updateCheck.push_back(update); ++updateCounter; } } for(int i=0; i<updateCounter; i++) { for(int j=0; j<intervalSize; j++) { if(updateCheck[i] == &h_intervalMap[j]) { h_intvlIndex[i] = j; break; } } } //Update all matched vehicles if(updateCounter >0) { hipMemcpyAsync(d_history, history, historySize*sizeof(History), hipMemcpyHostToDevice, stream4); hipMemcpyAsync(d_intervalMap, h_intervalMap, intervalSize*sizeof(PointCellDevice), hipMemcpyHostToDevice, stream5); hipMemcpyAsync(d_updataData, h_updateData, updateCounter*3*sizeof(float), hipMemcpyHostToDevice, stream2); hipMemcpyAsync(d_intvlIndex, h_intvlIndex, updateCounter*sizeof(int), hipMemcpyHostToDevice, stream3); hipDeviceSynchronize(); hipLaunchKernelGGL(( updateKernel), dim3(updateCounter),dim3(25), 0, 0, d_intervalMap, d_updataData, d_intvlIndex); hipLaunchKernelGGL(( findHistoryWithIDDevice), dim3(historySize),dim3(updateCounter), 0, 0, d_history,d_intervalMap,d_intvlIndex,d_IDincluded); hipLaunchKernelGGL(( checkHistoryForDuplicateDevice), dim3(updateCounter), dim3(MAX_LENGTH_HIST_CONV), 0, 0, d_history,d_intervalMap,d_intvlIndex,d_IDincluded,d_duplicate); hipLaunchKernelGGL(( addUpdatedPositionToHistoryDevice), dim3(1),dim3(updateCounter), 0, 0, d_history, d_intervalMap,d_intvlIndex, d_IDincluded,d_duplicate); hipMemcpyAsync(h_intervalMap, d_intervalMap, intervalSize*sizeof(PointCellDevice), hipMemcpyDeviceToHost, stream3); hipMemcpyAsync(history, d_history, historySize*sizeof(History), hipMemcpyDeviceToHost, stream4); hipDeviceSynchronize(); for(int i=0; i<updateCounter;i++) { h_convoyCheck[convoyCheckSize] = h_intervalMap[h_intvlIndex[i]]; ++convoyCheckSize; } } //delete all tracks that could not be matched for(uint k = 0; k < trackedVehicles.size(); k++) { PointCellDevice* tmp = trackedVehicles.at(k); for(uint m = 0; m < intervalSize; m++) { if(tmp == &h_intervalMap[m]) { h_intervalMap[m] = h_intervalMap[--intervalSize]; break; } } } //add all observations that could not be matched for(uint k = 0; k < indicesToAdd.size(); k++) { if(intervalSize < MAX_SEGMENTS) { #if SZENARIO == 6 h_intervalMap[intervalSize++] = h_vehicleSim[indicesToAdd.at(k)]; #else h_intervalMap[intervalSize++] = h_vehicles[indicesToAdd.at(k)]; #endif } } //find Convoy if(historySize > 0 && convoyCheckSize >0) { hipMemcpyAsync(d_newVeh, h_convoyCheck, convoyCheckSize*sizeof(PointCellDevice), hipMemcpyHostToDevice,stream2); hipMemcpyAsync(d_history, history, historySize*sizeof(History), hipMemcpyHostToDevice, stream4); dim3 grid(historySize, convoyCheckSize); hipDeviceSynchronize(); hipLaunchKernelGGL(( findConvoyDevice), dim3(grid), dim3(MAX_LENGTH_HIST_CONV), 0, 0, d_newVeh,d_history,d_historyMatch); hipMemcpy(h_historyMatch, d_historyMatch, convoyCheckSize*sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); for(uint i=0; i<convoyCheckSize;i++) { if(h_historyMatch[i] != INT_MAX) { PointCellDevice vehicle = h_convoyCheck[i]; float x = vehicle.getX(); int interval = floor(x); int id1 = h_historyMatch[i]; int id2 = vehicle.getID(); #ifdef PRINT std::cout << "ID1 " << id1 << " ID2 " << id2 << std::endl; #endif bool convoyFound = false; if(convoySize >0) { hipMemcpy(d_convoys, convoys, convoySize*sizeof(Convoy), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( findIDInConvoyDevice), dim3(convoySize), dim3(MAX_LENGTH_HIST_CONV),0,stream3, d_convoys, d_IDincluded,id1,id2); hipLaunchKernelGGL(( checkConvoyForDuplicateDevice), dim3(convoySize), dim3(MAX_LENGTH_HIST_CONV),0,stream2, d_convoys, &(d_newVeh[i]),d_duplicate); hipMemcpyAsync(h_IDincluded, d_IDincluded, convoySize*2*sizeof(int), hipMemcpyDeviceToHost, stream3); hipMemcpyAsync(h_duplicate, d_duplicate, convoySize*sizeof(bool), hipMemcpyDeviceToHost, stream2); hipDeviceSynchronize(); } for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV) { Convoy currentConvoy = convoys[j]; int it1 = h_IDincluded[j*2]; int it2 = h_IDincluded[j*2+1]; if(it1 == INT_MIN || it2 == INT_MIN) { continue; } if(it1 != INT_MAX && it2 != INT_MAX) { //convoy already exists with both IDS //check if this x value is already contained if(h_duplicate[j]) { //x value is not contained int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY(); convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta(); convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5f > convoys[j].highestValue.x) { convoys[j].highestValue.x = interval+0.5f; convoys[j].highestValue.y = vehicle.getY(); convoys[j].highestValue.theta = vehicle.getTheta(); convoys[j].highestValue.subIntvl = 0.5f; } } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl; #endif break; } } if(convoyFound) { continue; } for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV) { Convoy currentConvoy = convoys[j]; int it1 = h_IDincluded[j*2]; int it2 = h_IDincluded[j*2+1]; if(it1 == INT_MIN || it2 == INT_MIN) { continue; } if (it1 != INT_MAX) { int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; //check if this x value is already contained if(h_duplicate[j]) { convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY(); convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta(); convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5 > convoys[j].highestValue.x) { convoys[j].highestValue.x = interval+0.5f; convoys[j].highestValue.y = vehicle.getY(); convoys[j].highestValue.theta = vehicle.getTheta(); convoys[j].highestValue.subIntvl = 0.5f; } } int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV; convoys[j].participatingVehicles[currentConvoy.endIndexID] = id2; convoys[j].endIndexID = IDindex; if(IDindex == convoys[j].startIndexID) { convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV; } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl; #endif break; } else if (it2 != INT_MAX) { //only add position to convoy if it will be the highest value or the difference in y is not so big if(interval+0.5f < convoys[j].highestValue.x && !checkConvoyForY(vehicle.getY(),interval +0.5f,currentConvoy)) { continue; } int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; //check if this x value is already contained if(h_duplicate[j]) { convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY(); convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta(); convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5f > convoys[j].highestValue.x) { convoys[j].highestValue.x = interval+0.5f; convoys[j].highestValue.y = vehicle.getY(); convoys[j].highestValue.theta = vehicle.getTheta(); convoys[j].highestValue.subIntvl = 0.5f; } } int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV; convoys[j].participatingVehicles[currentConvoy.endIndexID] = id1; convoys[j].endIndexID = IDindex; if(IDindex == convoys[j].startIndexID) { convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV; } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl; #endif break; } } if(!convoyFound) { int cIndex = endIndexConvoys; convoys[cIndex].ID = convoyID++; convoys[cIndex].participatingVehicles[0] = id1; convoys[cIndex].participatingVehicles[1] = id2; convoys[cIndex].startIndexID = 0; convoys[cIndex].endIndexID = 2; convoys[cIndex].startIndexTracks = 0; convoys[cIndex].endIndexTracks = 1; convoys[cIndex].tracks[0].x = interval+0.5f; convoys[cIndex].tracks[0].y = vehicle.getY(); convoys[cIndex].tracks[0].theta = vehicle.getTheta(); convoys[cIndex].tracks[0].subIntvl = 0.5f; endIndexConvoys = (endIndexConvoys+1)%NUM_CONV; convoys[cIndex].highestValue.x = interval+0.5f; convoys[cIndex].highestValue.y = vehicle.getY(); convoys[cIndex].highestValue.theta = vehicle.getTheta(); convoys[cIndex].highestValue.subIntvl = 0.5f; if(convoySize == NUM_CONV) { startIndexConvoys = (startIndexConvoys+1)%NUM_CONV; } else { ++convoySize; } #ifdef PRINT std::cout << "new Convoy with ID " << convoyID-1 << " containing "<< id1 << " , " << id2 << std::endl; #endif } currentConvoyOnDevice = false; } } } } void ConvoyTracker::visualizeConvoys() { visualizer.visualizeConvoys(EML, convoys, startIndexConvoys, endIndexConvoys); } void ConvoyTracker::visualizeHistory() { visualizer.visualizeHistory(EML, history, startIndexHistory, endIndexHistory); } void ConvoyTracker::transformDataToDevice() { hipMemcpyAsync(d_history, history, historySize*sizeof(History), hipMemcpyHostToDevice,stream2); hipMemcpyAsync(d_intervalMap, h_intervalMap, intervalSize*sizeof(PointCellDevice), hipMemcpyHostToDevice,stream4); *h_historyMatchSelf = INT_MAX; hipMemcpyAsync(d_historyMatchSelf, h_historyMatchSelf, sizeof(int), hipMemcpyHostToDevice,stream5); hipMemcpyAsync(d_convoys, convoys, convoySize*sizeof(Convoy), hipMemcpyHostToDevice,stream3); hipDeviceSynchronize(); } void ConvoyTracker::transformDataFromDevice() { hipDeviceSynchronize(); std::vector<int> toDelete; hipMemcpyAsync(history, d_history, historySize*sizeof(History), hipMemcpyDeviceToHost,stream2); hipMemcpyAsync(h_intervalMap, d_intervalMap, intervalSize*sizeof(PointCellDevice), hipMemcpyDeviceToHost,stream4); hipMemcpyAsync(convoys, d_convoys, convoySize*sizeof(Convoy), hipMemcpyDeviceToHost,stream3); hipDeviceSynchronize(); if(historySize > 0) { *h_historyMatchSelf = INT_MAX; hipLaunchKernelGGL(( findConvoyDeviceSelf), dim3(historySize), dim3(MAX_LENGTH_HIST_CONV), 0, 0, d_history, d_historyMatchSelf); hipMemcpy(h_historyMatchSelf, d_historyMatchSelf, sizeof(int), hipMemcpyDeviceToHost); if(*h_historyMatchSelf != INT_MAX) { findConvoySelf(*h_historyMatchSelf); } } int end; for (int i = startIndexHistory; i != endIndexHistory; i = (i+1)%NUM_HIST) { int endId = (history[i].endIndex-1)%MAX_LENGTH_HIST_CONV; if(endId <0) { endId = MAX_LENGTH_HIST_CONV-1; } if(history[i].tracks[endId].x < -5) { //if yes, mark history to delete #ifdef PRINT std::cout << "Delete history with ID " << history[i].ID << std::endl; #endif toDelete.push_back(i); } } if(toDelete.size() > 0) { for(int i=toDelete.size()-1; i>=0; i--) { end = (endIndexHistory-1)%NUM_HIST; if(end < 0) { end = NUM_HIST-1; } if(toDelete.at(i) != end) { history[toDelete.at(i)] = history[end]; } endIndexHistory = end; --historySize; } } toDelete.clear(); for (int i = startIndexConvoys; i != endIndexConvoys; i = (i + 1) % NUM_CONV) { end = (convoys[i].endIndexTracks-1) % MAX_LENGTH_HIST_CONV; if(end < 0) { end = MAX_LENGTH_HIST_CONV-1; } if(convoys[i].highestValue.x < -5) { #ifdef PRINT std::cout << "delete convoy with ID " << convoys[i].ID << std::endl; #endif toDelete.push_back(i); } } if(toDelete.size() > 0) { for(int i=toDelete.size()-1; i >=0; i--) { end = (endIndexConvoys-1) % NUM_CONV; if(end < 0) { end = NUM_CONV-1; } convoys[toDelete.at(i)] = convoys[end]; endIndexConvoys = end; --convoySize; } } toDelete.clear(); for(uint i=0; i<intervalSize;i++) { if(h_intervalMap[i].getX() < -100) { toDelete.push_back(i); } } if(toDelete.size() > 0) { for(int i=toDelete.size()-1; i>=0;i--) { h_intervalMap[toDelete.at(i)] = h_intervalMap[--intervalSize]; } } } void ConvoyTracker::findConvoySelf(int ID) { double x = 0; int interval = floor(x); int id1 = -1; int id2 = ID; #ifdef PRINT std::cout << "ID1 " << id1 << " ID2 " << id2 << std::endl; #endif bool convoyFound = false; if(convoySize >0) { hipMemcpy(d_convoys, convoys, convoySize*sizeof(Convoy), hipMemcpyHostToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( findIDInConvoyDevice), dim3(convoySize), dim3(MAX_LENGTH_HIST_CONV),0,stream3, d_convoys, d_IDincluded,id1,id2); hipLaunchKernelGGL(( checkConvoyForDuplicateDeviceSelf), dim3(convoySize), dim3(MAX_LENGTH_HIST_CONV),0,stream2, d_convoys,d_duplicate); hipMemcpyAsync(h_IDincluded, d_IDincluded, convoySize*2*sizeof(int), hipMemcpyDeviceToHost, stream3); hipMemcpyAsync(h_duplicate, d_duplicate, convoySize*sizeof(bool), hipMemcpyDeviceToHost, stream2); hipDeviceSynchronize(); } for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV) { Convoy currentConvoy = convoys[j]; int it1 = h_IDincluded[j*2]; int it2 = h_IDincluded[j*2+1]; if(it1 != INT_MAX && it2 != INT_MAX) { //convoy already exists with both IDS //check if this x value is already contained if(h_duplicate[j]) { //x value is not contained int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; convoys[j].tracks[currentConvoy.endIndexTracks].x = 0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = 0; convoys[j].tracks[currentConvoy.endIndexTracks].theta = 0; convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5 > convoys[j].highestValue.x) { convoys[j].highestValue.x = 0.5f; convoys[j].highestValue.y = 0; convoys[j].highestValue.theta = 0; convoys[j].highestValue.subIntvl = 0.5f; } } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << std::endl; #endif break; } else if (it1 != INT_MAX) { int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; //check if this x value is already contained if(h_duplicate[j]) { convoys[j].tracks[currentConvoy.endIndexTracks].x = 0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = 0; convoys[j].tracks[currentConvoy.endIndexTracks].theta = 0; convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5 > convoys[j].highestValue.x) { convoys[j].highestValue.x = 0.5f; convoys[j].highestValue.y = 0; convoys[j].highestValue.theta = 0; convoys[j].highestValue.subIntvl = 0.5f; } } int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV; convoys[j].participatingVehicles[currentConvoy.endIndexID] = id2; convoys[j].endIndexID = IDindex; if(IDindex == convoys[j].startIndexID) { convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV; } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << std::endl; #endif break; } } if(!convoyFound) { int cIndex = endIndexConvoys; convoys[cIndex].ID = convoyID++; convoys[cIndex].participatingVehicles[0] = id1; convoys[cIndex].participatingVehicles[1] = id2; convoys[cIndex].startIndexID = 0; convoys[cIndex].endIndexID = 2; convoys[cIndex].startIndexTracks = 0; convoys[cIndex].endIndexTracks = 1; convoys[cIndex].tracks[0].x = 0.5; convoys[cIndex].tracks[0].y = 0; convoys[cIndex].tracks[0].theta = 0; convoys[cIndex].tracks[0].subIntvl = 0.5; endIndexConvoys = (endIndexConvoys+1)%NUM_CONV; convoys[cIndex].highestValue.x = 0.5; convoys[cIndex].highestValue.y = 0; convoys[cIndex].highestValue.theta = 0; convoys[cIndex].highestValue.subIntvl = 0.5; if(convoySize == NUM_CONV) { startIndexConvoys = (startIndexConvoys+1)%NUM_CONV; } else { ++convoySize; } #ifdef PRINT std::cout << "new Convoy with ID " << convoyID-1 << " containing "<< id1 << " , " << id2 << std::endl; #endif } } bool ConvoyTracker::checkConvoyForY(float y, float x, Convoy c) { double min = INT_MAX; double dist; int index; for(int i=c.startIndexTracks; i != c.endIndexTracks; i = (i+1)%MAX_LENGTH_HIST_CONV) { dist = fabsf(c.tracks[i].x - x); if(dist < min) { min = dist; index = i; } } dist = fabsf(c.tracks[index].y - y); if(dist > CONVOY_THRESHOLD_Y) { return false; } return true; }
4f06112d08711738de05b4c8402563706cfe6841.cu
/* * ConvoyTracker.cu * * Created on: 06.06.2016 * Author: Sebastian Reinhart * * FOR DETAILED COMMENTS SEE CONVOY_TRACKER_ZERO_COPY */ #include "ConvoyTracker.cuh" #include <assert.h> ConvoyTracker::ConvoyTracker() { currentSpeed = 0; currentYawRate = 0; x = 0; y = 0; yaw = 0; xOld = 0; yOld = 0; yawOld = 0; ID = 0; convoyID = 0; currentHistoryOnDevice = false; currentConvoyOnDevice = false; convoySize = 0; startIndexConvoys = 0; endIndexConvoys = 0; historySize = 0; startIndexHistory = 0; endIndexHistory = 0; convoyCheckSize = 0; intervalSize = 0; cudaError_t error; cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); cudaStreamCreate(&stream4); cudaStreamCreate(&stream5); error = cudaHostAlloc((void**) &history, NUM_HIST*sizeof(History), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_history, NUM_HIST*sizeof(History)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_historyMatch, MAX_SEGMENTS*sizeof(int), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_historyMatch, MAX_SEGMENTS*sizeof(int)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_historyMatchSelf, sizeof(int), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_historyMatchSelf, sizeof(int)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_intervalMap, MAX_SEGMENTS*sizeof(PointCellDevice), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_intervalMap, MAX_SEGMENTS*sizeof(PointCellDevice)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_convoyCheck, MAX_SEGMENTS*sizeof(PointCellDevice), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_newVeh, MAX_SEGMENTS*sizeof(PointCellDevice)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_IDincluded, NUM_HIST*2*sizeof(int), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_IDincluded, NUM_HIST*2*sizeof(int)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_duplicate, NUM_HIST*sizeof(bool), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_duplicate, NUM_HIST*sizeof(bool)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_updateData, MAX_SEGMENTS*3*sizeof(float), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_updataData, MAX_SEGMENTS*3*sizeof(float)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_intvlIndex, MAX_SEGMENTS*sizeof(int), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_intvlIndex, MAX_SEGMENTS*sizeof(int)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_vehicles, MAX_SEGMENTS*sizeof(PointCellDevice), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_vehicles, MAX_SEGMENTS*sizeof(PointCellDevice)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &h_distance, MAX_SEGMENTS*MAX_SEGMENTS*sizeof(float), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_distance, MAX_SEGMENTS*MAX_SEGMENTS*sizeof(float)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } size_t sizeConv = NUM_CONV; sizeConv *= sizeof(Convoy); error = cudaHostAlloc((void **) &convoys, sizeConv, cudaHostAllocDefault); if(error != cudaSuccess) { printf( "cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **) &d_convoys, NUM_CONV*sizeof(Convoy)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaHostAlloc((void**) &xSubInterval, sizeof(float), cudaHostAllocMapped); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } *xSubInterval = 0; error = cudaHostGetDevicePointer(&d_subIntvl_ptr, xSubInterval, 0); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } } ConvoyTracker::~ConvoyTracker() { cudaFreeHost(xSubInterval); cudaFreeHost(convoys); cudaFreeHost(history); cudaFreeHost(h_historyMatch); cudaFreeHost(h_convoyCheck); cudaFreeHost(h_intervalMap); cudaFreeHost(h_historyMatchSelf); cudaFreeHost(h_IDincluded); cudaFreeHost(h_vehicles); cudaFreeHost(h_distance); cudaFreeHost(h_duplicate); cudaFreeHost(h_intvlIndex); cudaFree(d_convoys); cudaFree(d_history); cudaFree(d_historyMatch); cudaFree(d_newVeh); cudaFree(d_intervalMap); cudaFree(d_historyMatchSelf); cudaFree(d_IDincluded); cudaFree(d_vehicles); cudaFree(d_distance); cudaFree(d_duplicate); cudaFree(d_intvlIndex); cudaStreamDestroy(stream2); cudaStreamDestroy(stream3); cudaStreamDestroy(stream4); cudaStreamDestroy(stream5); } std::string getNextMeasureAsString(int i) { std::ostringstream number; if(i<10) { number << "000" << i; } else if(i<100) { number << "00" << i; } else if(i<1000) { number << "0" << i; } else { number << i; } return number.str(); } __device__ void shiftRotateHistory(History* d_pc, float x, float y, float theta, int index) { //update history if(((index < d_pc->endIndex) && (d_pc->endIndex > d_pc->startIndex)) || ((d_pc->endIndex < d_pc->startIndex) && (index != d_pc->endIndex))) { d_pc->tracks[index].subIntvl += x; int numIntervals = (int) ((d_pc->tracks[index].subIntvl) / INTERVALL_LENGTH); d_pc->tracks[index].x -= numIntervals; d_pc->tracks[index].subIntvl -= numIntervals; float angleInRadians = theta*((float)M_PI)/180.0f; float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) }, { sinf(angleInRadians), cosf(angleInRadians) } }; //update history d_pc->tracks[index].y -= y; d_pc->tracks[index].theta -= angleInRadians; float xAbs = d_pc->tracks[index].x; float yAbs = d_pc->tracks[index].y; xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs; yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs; d_pc->tracks[index].y -= yAbs; d_pc->tracks[index].subIntvl -= xAbs; } } __device__ void shiftRotateConvoy(Convoy* d_eml, float x, float y, float theta, int index) { if(((index < d_eml->endIndexTracks) && (d_eml->endIndexTracks > d_eml->startIndexTracks)) || ((d_eml->endIndexTracks < d_eml->startIndexTracks) && (index != d_eml->endIndexTracks))) { d_eml->tracks[index].subIntvl += x; int numIntervals = (int) ((d_eml->tracks[index].subIntvl) / INTERVALL_LENGTH); d_eml->tracks[index].x -= numIntervals; d_eml->tracks[index].subIntvl -= numIntervals; float angleInRadians = theta*((float)M_PI)/180.0f; float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) }, { sinf(angleInRadians), cosf(angleInRadians) } }; d_eml->tracks[index].y -= y; d_eml->tracks[index].theta -= angleInRadians; float xAbs = d_eml->tracks[index].x; float yAbs = d_eml->tracks[index].y; xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs; yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs; d_eml->tracks[index].y -= yAbs; d_eml->tracks[index].subIntvl -= xAbs; } } __device__ void computeIntervalMap(PointCellDevice* d_interval, float xMotion, float yMotion, float angle, float* xSubInterval) { float angleInRadians = angle * ((float)M_PI) / 180.0f; *xSubInterval += xMotion; int numIntervals = (int) (*xSubInterval / INTERVALL_LENGTH); *xSubInterval -= numIntervals; for (int i = 0; i < numIntervals; i++) { float x = d_interval->getX(); int interval = floor(x) + CARINTERVAL; if(interval == 0) { //delete content d_interval->setX(-10000); continue; } d_interval->setX(floor(x) - 0.5f); } int interval = floor(d_interval->getX()); //1.Step correct directions of stored PCs d_interval->setY(d_interval->getY() - yMotion); d_interval->setTheta(d_interval->getTheta() - angleInRadians); //2. compensate rotation float xAbs = ( interval - CARINTERVAL + 0.5f) * INTERVALL_LENGTH - *xSubInterval; float yAbs = d_interval->getY(); float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) }, { sinf(angleInRadians), cosf(angleInRadians) } }; xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs; yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs; d_interval->setY(d_interval->getY() - yAbs); if(xAbs > 0.5f*INTERVALL_LENGTH) { d_interval->setX(interval + 1.5f); } else if(xAbs < -0.5f*INTERVALL_LENGTH) { d_interval->setX(interval - 0.5f); } else { d_interval->subInvtl -= xAbs; } } __device__ bool findHistoryMatch(PointCellDevice* trackedVehicles, History* d_history, int index) { bool result = (d_history->ID != trackedVehicles->getID()); result = (result && (d_history->tracks[index].x - 0.5f <= trackedVehicles->getX())); result = (result && (trackedVehicles->getX() <= d_history->tracks[index].x + 0.5f)); result = (result && (d_history->tracks[index].y - 1.0f <= trackedVehicles->getY())); result = (result && (trackedVehicles->getY() <= d_history->tracks[index].y + 1.0f)); return result; } __device__ bool findHistoryMatchSelf(History* d_history, int index) { bool result = true; result = (result && (d_history->tracks[index].x - 0.5f <= 0.0f)); result = (result && (0 <= d_history->tracks[index].x + 0.5f)); result = (result && (d_history->tracks[index].y - 1.0f <= 0.0f)); result = (result && (0 <= d_history->tracks[index].y + 1.0f)); return result; } __device__ void predictDevice(PointCellDevice* vehicle, int index) { int state = index%5; //row int i = index / 5 ; //column int j = state; vehicle->data[state+5] = vehicle->data[state]; __syncthreads(); vehicle->computeF(); vehicle->computeCovarianceF(); float tmp = 0; //Tmp = F*P for(int k=0; k<5; k++) { tmp += vehicle->getF(i,k)*vehicle->getP(k,j); } vehicle->writeTmp(i,j, tmp); __syncthreads(); //P = tmp*F_t tmp = 0; for(int k=0; k<5; k++) { tmp += vehicle->getTmp(i,k)*vehicle->getF(j,k); } vehicle->writeP(i,j, tmp); __syncthreads(); //P = P+Q tmp = vehicle->getP(i,j) + vehicle->getQ(i,j); vehicle->writeP(i,j, tmp); } __global__ void compensateEgoMotionMap(PointCellDevice* d_interval, float* d_subIntvl, float x, float y, float angle) { computeIntervalMap(&(d_interval[threadIdx.x]), x, y, angle, d_subIntvl); } __global__ void compensateEgoMotionHistory(History* d_history, float x, float y, float angle) { shiftRotateHistory(&(d_history[blockIdx.x]), x, y, angle, threadIdx.x); } __global__ void compensateEgoMotionConvoy(Convoy* d_convoy, float x, float y, float angle) { shiftRotateConvoy(&(d_convoy[blockIdx.x]), x, y, angle, threadIdx.x); } __global__ void findConvoyDevice(PointCellDevice* trackedVehicles, History* d_history, int* d_historyMatch) { if(((threadIdx.x < d_history[blockIdx.x].endIndex) && (d_history[blockIdx.x].endIndex > d_history[blockIdx.x].startIndex)) || ((d_history[blockIdx.x].endIndex < d_history[blockIdx.x].startIndex) && (threadIdx.x != d_history[blockIdx.x].endIndex))) { if(findHistoryMatch(&(trackedVehicles[blockIdx.y]),&(d_history[blockIdx.x]),threadIdx.x)) { atomicMin(&(d_historyMatch[blockIdx.y]), d_history[blockIdx.x].ID); } } } __global__ void findConvoyDeviceSelf(History* d_history, int* d_historyMatchSelf) { if(((threadIdx.x < d_history[blockIdx.x].endIndex) && (d_history[blockIdx.x].endIndex > d_history[blockIdx.x].startIndex)) || ((d_history[blockIdx.x].endIndex < d_history[blockIdx.x].startIndex) && (threadIdx.x != d_history[blockIdx.x].endIndex))) { if(findHistoryMatchSelf(&(d_history[blockIdx.x]),threadIdx.x)) { atomicMin(d_historyMatchSelf, d_history[blockIdx.x].ID); } } } __global__ void memSetHistoryMatch(int* d_historyMatch) { d_historyMatch[threadIdx.x] = INT_MAX; } /* * Run Kalman-Filter Predict on Device with #vehicles as Blocks and 25 Threads per Block */ __global__ void predict(PointCellDevice* d_interval) { predictDevice(&(d_interval[blockIdx.x]), threadIdx.x); } /* * Run Kalman-Filter Update on Device with 25 Threads */ __device__ void updateDevice(PointCellDevice* d_interval, int index, float velocity, float phi, float xNew, float yNew, float thetaNew) { //row int i = index / 5; //column int j = index % 5; float tmp = 0; //tmp = H*P for(int k=0; k<5; k++) { tmp += d_interval->getH(i,k)*d_interval->getP(k,j); } d_interval->writeTmp(i,j, tmp); __syncthreads(); //S = tmp*H_t tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getTmp(i,k)*d_interval->getH(j,k); } d_interval->writeS(i,j, tmp); __syncthreads(); //S = S+R tmp = d_interval->getS(i,j) + d_interval->getR(i,j); d_interval->writeS(i,j, tmp); __syncthreads(); //tmp = P*H_t tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getP(i,k)*d_interval->getH(j,k); } d_interval->writeTmp(i,j, tmp); __syncthreads(); //invertS if(threadIdx.x == 0) { d_interval->invertS(); } __syncthreads(); //K = tmp*S_i tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getTmp(i,k)*d_interval->getS(k,j); } d_interval->writeK(i,j, tmp); __syncthreads(); //tmp = K*(newState-stateVector) tmp = 0; tmp += d_interval->getK(i,0)*(xNew-d_interval->getX()); tmp += d_interval->getK(i,1)*(yNew-d_interval->getY()); tmp += d_interval->getK(i,2)*(thetaNew-d_interval->getTheta()); tmp += d_interval->getK(i,3)*(velocity-d_interval->getVelocity()); tmp += d_interval->getK(i,4)*(phi-d_interval->getPhi()); d_interval->writeTmp(i,j, tmp); __syncthreads(); //stateVector = stateVector + tmp if(threadIdx.x == 0) { d_interval->setX(d_interval->getX() + d_interval->getTmp(0,0)); d_interval->setY(d_interval->getY() + d_interval->getTmp(1,0)); d_interval->setTheta(d_interval->getTheta() + d_interval->getTmp(2,0)); d_interval->setVelocity(d_interval->getVelocity() + d_interval->getTmp(3,0)); d_interval->setPhi(d_interval->getPhi() + d_interval->getTmp(4,0)); } __syncthreads(); //tmp = K*H tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getK(i,k)*d_interval->getH(k,j); } d_interval->writeTmp(i,j, tmp); __syncthreads(); //tmp = I- tmp tmp = d_interval->getI(i,j) - d_interval->getTmp(i,j); d_interval->writeTmp(i,j, tmp); __syncthreads(); //tmp2 = tmp*P tmp = 0; for(int k=0; k<5; k++) { tmp += d_interval->getTmp(i,k)*d_interval->getP(k,j); } d_interval->writeTmp2(i,j, tmp); __syncthreads(); d_interval->writeP(i,j, d_interval->getTmp2(i,j)); } __global__ void updateOne(PointCellDevice* d_interval, int index, float velocity, float phi, float xNew, float yNew, float thetaNew) { updateDevice(d_interval,threadIdx.x,velocity, phi,xNew,yNew,thetaNew); } __global__ void updateKernel(PointCellDevice* d_intvl, float* d_updateData, int* d_intvlIndex) { int index = d_intvlIndex[blockIdx.x]; float xNew = d_updateData[blockIdx.x*3]; float yNew = d_updateData[blockIdx.x*3+1]; float thetaNew = d_updateData[blockIdx.x*3+2]; float x = d_intvl[index].data[5]; float y = d_intvl[index].data[6]; float theta = d_intvl[index].data[7]; float velocity = sqrtf((xNew - x) * (xNew - x) + (yNew - y)*(yNew - y)) / TIMESTAMP; float phi = (thetaNew-theta) / TIMESTAMP; if(threadIdx.x == 0) { d_intvl[index].setVelocity(velocity); d_intvl[index].setPhi(phi); } updateDevice(&(d_intvl[index]),threadIdx.x,velocity, phi,xNew,yNew,thetaNew); } __global__ void findIDInConvoyDevice(Convoy* d_convoy, int* d_IDIncluded, int id1, int id2) { if(((threadIdx.x < d_convoy[blockIdx.x].endIndexID) && (d_convoy[blockIdx.x].endIndexID > d_convoy[blockIdx.x].startIndexID)) || ((d_convoy[blockIdx.x].endIndexID < d_convoy[blockIdx.x].startIndexID) && (threadIdx.x != d_convoy[blockIdx.x].endIndexID))) { int index = blockIdx.x*2; d_IDIncluded[index] = INT_MAX; d_IDIncluded[index+1] = INT_MAX; __syncthreads(); int result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == id1); if(result) { atomicMin(&(d_IDIncluded[index]), threadIdx.x); } result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == id2); if(result) { atomicMin(&(d_IDIncluded[index+1]), threadIdx.x); } result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == -1); if(result) { atomicMin(&(d_IDIncluded[index+1]), INT_MIN); atomicMin(&(d_IDIncluded[index]), INT_MIN); } } } __global__ void checkConvoyForDuplicateDevice(Convoy* d_convoy, PointCellDevice* d_vehicle, bool* d_duplicate) { if(((threadIdx.x < d_convoy[blockIdx.x].endIndexTracks) && (d_convoy[blockIdx.x].endIndexTracks > d_convoy[blockIdx.x].startIndexTracks)) || ((d_convoy[blockIdx.x].endIndexTracks < d_convoy[blockIdx.x].startIndexTracks) && (threadIdx.x != d_convoy[blockIdx.x].endIndexTracks))) { d_duplicate[blockIdx.x] = true; __syncthreads(); bool result = (d_convoy[blockIdx.x].tracks[threadIdx.x].x != (floor(d_vehicle->getX())+0.5f)); if(!result) { d_duplicate[blockIdx.x] = d_duplicate[blockIdx.x] && result; } } } __global__ void checkHistoryForDuplicateDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded, bool* d_duplicate) { if(((threadIdx.x < d_history[d_IDincluded[blockIdx.x]].endIndex) && (d_history[d_IDincluded[blockIdx.x]].endIndex > d_history[d_IDincluded[blockIdx.x]].startIndex)) || ((d_history[d_IDincluded[blockIdx.x]].endIndex < d_history[d_IDincluded[blockIdx.x]].startIndex) && (threadIdx.x != d_history[d_IDincluded[blockIdx.x]].endIndex))) { d_duplicate[blockIdx.x] = true; int index = d_intvlIndex[blockIdx.x]; int intvl = floor(d_intvl[index].getX()); intvl += 0.5f; if(d_history[d_IDincluded[blockIdx.x]].tracks[threadIdx.x].x == intvl) { d_duplicate[blockIdx.x] = false; } } } __global__ void checkConvoyForDuplicateDeviceSelf(Convoy* d_convoy, bool* d_duplicate) { if(((threadIdx.x < d_convoy[blockIdx.x].endIndexTracks) && (d_convoy[blockIdx.x].endIndexTracks > d_convoy[blockIdx.x].startIndexTracks)) || ((d_convoy[blockIdx.x].endIndexTracks < d_convoy[blockIdx.x].startIndexTracks) && (threadIdx.x != d_convoy[blockIdx.x].endIndexTracks))) { d_duplicate[blockIdx.x] = true; bool result = (d_convoy[blockIdx.x].tracks[threadIdx.x].x != 0.5f); if(!result) { d_duplicate[blockIdx.x] = d_duplicate[blockIdx.x] && result; } } } __global__ void findHistoryWithIDDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded) { int index = d_intvlIndex[threadIdx.x]; int ID = d_intvl[index].getID(); if(d_history[blockIdx.x].ID == ID) { d_IDincluded[threadIdx.x] = blockIdx.x; } } __global__ void addUpdatedPositionToHistoryDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded, bool* d_duplicate) { int intvl = floor(d_intvl[d_intvlIndex[threadIdx.x]].getX()); d_intvl[d_intvlIndex[threadIdx.x]].setX(intvl+ 0.5f); int historyIndex = d_IDincluded[threadIdx.x]; if(d_duplicate[threadIdx.x]) { int index = d_history[historyIndex].endIndex; d_history[historyIndex].tracks[index].subIntvl = 0.5f; d_history[historyIndex].tracks[index].x = d_intvl[d_intvlIndex[threadIdx.x]].getX(); d_history[historyIndex].tracks[index].y = d_intvl[d_intvlIndex[threadIdx.x]].getY(); d_history[historyIndex].tracks[index].theta = d_intvl[d_intvlIndex[threadIdx.x]].getTheta(); index = (index+1)%MAX_LENGTH_HIST_CONV; d_history[historyIndex].endIndex = index; if(index == d_history[historyIndex].startIndex) { d_history[historyIndex].startIndex = (d_history[historyIndex].startIndex+1)%NUM_HIST; } } } __global__ void computeDistancesDevice(PointCellDevice* d_vehicles, PointCellDevice* d_intvl, float* d_distance) { int index = blockIdx.x*MAX_SEGMENTS + threadIdx.x; float x = d_vehicles[blockIdx.x].getX(); float y = d_vehicles[blockIdx.x].getY(); float theta = d_vehicles[blockIdx.x].getTheta(); float x1 = d_intvl[threadIdx.x].getX(); float y1 = d_intvl[threadIdx.x].getY(); float theta1 = d_intvl[threadIdx.x].getTheta(); d_distance[index] = sqrtf((x - x1)*(x - x1) + (y - y1)*(y - y1) + (theta - theta1)*(theta - theta1)); } int main() { #ifdef CREATE_MEASURES PGMReader pgmReader; double speed = 4.0/3.0; for(int i=0; i<NUM_MEASUREMENT; i++) { std::string number = getNextMeasureAsString(i); pgmReader.simulateLaserRays(number); /* std::ofstream EMLMeasureFile; std::ostringstream measurePath; measurePath << "./Laserdata/EML" << number << ".txt"; EMLMeasureFile.open (measurePath.str().c_str()); EMLMeasureFile << ((double)i)*speed << " 0 0 120 0" << std::endl; EMLMeasureFile.close();*/ } #endif int devID = 0; cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (deviceProp.canMapHostMemory != 1){ fprintf(stderr, "Device cannot map memory!\n"); return 1; } if (error != cudaSuccess) { printf( "cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else { #ifdef PRINT printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); #endif } cudaSetDeviceFlags(cudaDeviceMapHost); cudaEvent_t startEvent, stopEvent, start2Event, stop2Event; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); cudaEventCreate(&start2Event); cudaEventCreate(&stop2Event); float time = 0; cudaEventRecord(startEvent, 0); ConvoyTracker tracker; cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time,startEvent,stopEvent); #if SZENARIO == 6 std::vector<PointCellDevice> vehiclesSim; error = cudaHostAlloc((void**) &tracker.h_vehicleSim, MAX_SEGMENTS*sizeof(PointCellDevice), cudaHostAllocDefault); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaMalloc((void **)&tracker.d_vehicleSim, MAX_SEGMENTS*sizeof(PointCellDevice)); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } for(uint i=0;i <20; i++) { tracker.h_vehicleSim[i].initializeMemory(); if(i%2 == 0) { tracker.h_vehicleSim[i].setY(-3); tracker.h_vehicleSim[i].setVelocity(38.9); } else { tracker.h_vehicleSim[i].setY(3); tracker.h_vehicleSim[i].setVelocity(27.8); } tracker.h_vehicleSim[i].setX((i/2)*8); tracker.h_vehicleSim[i].setTheta(0); tracker.h_vehicleSim[i].setPhi(0); } cudaMemcpy(tracker.d_vehicleSim, tracker.h_vehicleSim, MAX_SEGMENTS*sizeof(PointCellDevice), cudaMemcpyHostToDevice); #endif cudaEventRecord(startEvent, 0); int vehicleCount = 0; float compensateHistory[NUM_MEASUREMENT]; for(int i=0; i<NUM_MEASUREMENT; i++) { cudaEventRecord(start2Event, 0); std::vector<PointCellDevice*> trackedVehicles; std::string number = getNextMeasureAsString(i); tracker.readEMLData(number); //1. Compensate own vehicle motion double deltaX = tracker.getX() - tracker.getXOld(); double deltaY = tracker.getY() - tracker.getYOld(); double deltaYaw = tracker.getYaw() - tracker.getYawOld(); float angleInRadians = ((float)deltaYaw) * ((float)M_PI) / 180.0f; float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) }, { sinf(angleInRadians), cosf(angleInRadians) } }; tracker.transformDataToDevice(); if(tracker.historySize > 0) { compensateEgoMotionHistory<<<tracker.historySize, MAX_LENGTH_HIST_CONV,0, tracker.stream2>>>(tracker.d_history, deltaX, deltaY, deltaYaw); } vehicleCount = tracker.reader.processLaserData(number,tracker.getCurrentSpeed(), tracker.getCurrentYawRate(), tracker.h_vehicles); if(tracker.convoySize > 0) { compensateEgoMotionConvoy<<<tracker.convoySize, MAX_LENGTH_HIST_CONV,0, tracker.stream3>>>(tracker.d_convoys, deltaX, deltaY, deltaYaw); } if(tracker.intervalSize > 0) { compensateEgoMotionMap<<<1,tracker.intervalSize,0,tracker.stream4>>>(tracker.d_intervalMap, tracker.d_subIntvl_ptr, deltaX, deltaY, deltaYaw); predict<<<tracker.intervalSize,25,0,tracker.stream4>>>(tracker.d_intervalMap); } tracker.transformDataFromDevice(); for(uint k = 0; k < tracker.convoySize; k++) { tracker.convoys[k].highestValue.subIntvl += ((float)deltaX); int numIntervals = (int) ((tracker.convoys[k].highestValue.subIntvl) / INTERVALL_LENGTH); tracker.convoys[k].highestValue.x -= numIntervals; tracker.convoys[k].highestValue.subIntvl -= numIntervals; tracker.convoys[k].highestValue.y -= deltaY; tracker.convoys[k].highestValue.theta -= angleInRadians; float xAbs = tracker.convoys[k].highestValue.x; float yAbs = tracker.convoys[k].highestValue.y; xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs; yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs; tracker.convoys[k].highestValue.y -= yAbs; tracker.convoys[k].highestValue.subIntvl -= xAbs; } for(uint j=0; j<tracker.intervalSize;j++) { trackedVehicles.push_back(&(tracker.h_intervalMap[j])); } //3. Associate and Update #if SZENARIO == 6 tracker.associateAndUpdate(20, trackedVehicles); #else tracker.associateAndUpdate(vehicleCount, trackedVehicles); #endif cudaEventRecord(stop2Event, 0); cudaEventSynchronize(stop2Event); float time3; cudaEventElapsedTime(&time3, start2Event, stop2Event); compensateHistory[i] = time3; } cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); float sumH = 0; for(int i = 0; i< NUM_MEASUREMENT; i++) { sumH += compensateHistory[i]; } sumH /= NUM_MEASUREMENT; #ifdef PRINT std::cout << "Duration of compensate History: " << sumH << std::endl; #endif float time2; cudaEventElapsedTime(&time2, startEvent, stopEvent); #ifdef PRINT std::cout << "Overall Time: " << time +time2<< std::endl; #else std::cout << time + time2 << std::endl; #endif #if SZENARIO == 6 cudaFreeHost(tracker.h_vehicleSim); #endif tracker.visualizeConvoys(); tracker.visualizeHistory(); return 0; } /** * Stores current Speed and yaw rate from file to class variables */ void ConvoyTracker::readEMLData(std::string number) { std::ostringstream measurePath; measurePath << EMLPATH << number << ".txt"; #ifdef PRINT std::cout << measurePath.str() << std::endl; #endif std::ifstream input(measurePath.str().c_str()); std::string line; std::string segment; if(std::getline( input, line )) { std::stringstream ss; ss << line; int dataCnt = 1; while(std::getline(ss, segment, ' ')) { switch (dataCnt) { case 1: { //x in m xOld = x; x = atof(segment.c_str()); ++dataCnt; break; } case 2: { //y in m yOld = y; y = atof(segment.c_str()); ++dataCnt; break; } case 3: { //yaw in ° yawOld = yaw; yaw = atof(segment.c_str()); ++dataCnt; break; } case 4: { //velocity in kmh //Compute value in m/s currentSpeed = atof(segment.c_str()) / 3.6; ++dataCnt; break; } case 5: { //yaw rate in °/s //Compute value in rad/s currentYawRate = atof(segment.c_str()) * M_PI / 180.0; break; } } } EMLPos curPos; curPos.x = x; curPos.y = y; curPos.theta = yaw; EML.push_back(curPos); } } double ConvoyTracker::getCurrentSpeed() const { return currentSpeed; } void ConvoyTracker::setCurrentSpeed(double currentSpeed) { this->currentSpeed = currentSpeed; } double ConvoyTracker::getCurrentYawRate() const { return currentYawRate; } void ConvoyTracker::setCurrentYawRate(double currentYawRate) { this->currentYawRate = currentYawRate; } double ConvoyTracker::getX() const { return x; } void ConvoyTracker::setX(double x) { this->x = x; } double ConvoyTracker::getXOld() const { return xOld; } void ConvoyTracker::setXOld(double old) { this->xOld = old; } double ConvoyTracker::getY() const { return y; } void ConvoyTracker::setY(double y) { this->y = y; } double ConvoyTracker::getYaw() const { return yaw; } void ConvoyTracker::setYaw(double yaw) { this->yaw = yaw; } double ConvoyTracker::getYawOld() const { return yawOld; } void ConvoyTracker::setYawOld(double yawOld) { this->yawOld = yawOld; } double ConvoyTracker::getYOld() const { return yOld; } void ConvoyTracker::setYOld(double old) { yOld = old; } /** * Searches for corresponding vehicles using Global Nearest Neighbor algorithm and updates the results */ void ConvoyTracker::associateAndUpdate(int vehicleCount, std::vector<PointCellDevice*> trackedVehicles) { //initialize all IDs in possible history to -1 to have no false detection in findConvoy memSetHistoryMatch<<<1,MAX_SEGMENTS,0,stream2>>>(d_historyMatch); convoyCheckSize = 0; int updateCounter = 0; int indexCounter = trackedVehicles.size(); std::vector<int> indicesToAdd; std::vector<PointCellDevice*> updateCheck; for(uint i = 0; i<vehicleCount; i++) { #if SZENARIO == 6 double x = h_vehicleSim[i].getX(); double y = h_vehicleSim[i].getY(); double theta = h_vehicleSim[i].getTheta(); #else double x = h_vehicles[i].getX(); double y = h_vehicles[i].getY(); double theta = h_vehicles[i].getTheta(); #endif double minDist = INT_MAX; int minIndex = INT_MAX; #ifdef PRINT std::cout << "X: " << x << " Y: " << y << " Theta: " << theta <<std::endl; #endif for(uint j = 0; j<trackedVehicles.size(); j++) { double x1 = trackedVehicles.at(j)->getX(); double y1 = trackedVehicles.at(j)->getY(); double theta1 = trackedVehicles.at(j)->getTheta(); #ifdef PRINT std::cout << "X1: " << x1 << " Y1: " << y1<< " Theta1: " << theta1 <<std::endl; #endif double dist = sqrt((x - x1)*(x - x1) + (y - y1)*(y - y1) + (theta - theta1)*(theta - theta1)); if(dist < minDist) { minDist = dist; minIndex = j; } } #ifdef PRINT std::cout << "Min distance: " << minDist << std::endl; #endif if(minDist > ASSOCIATION_THRESHOLD) { //do not associate vehicles with to big distance in between //create new track instead ++ID; #if SZENARIO == 6 h_vehicleSim[i].setID(ID); #else h_vehicles[i].setID(ID); #endif indicesToAdd.push_back(i); history[endIndexHistory].ID = ID; history[endIndexHistory].tracks[0].subIntvl = 0.5f; #if SZENARIO == 6 history[endIndexHistory].tracks[0].x = h_vehicleSim[i].getX(); history[endIndexHistory].tracks[0].y = h_vehicleSim[i].getY(); history[endIndexHistory].tracks[0].theta = h_vehicleSim[i].getTheta(); #else history[endIndexHistory].tracks[0].x = h_vehicles[i].getX(); history[endIndexHistory].tracks[0].y = h_vehicles[i].getY(); history[endIndexHistory].tracks[0].theta = h_vehicles[i].getTheta(); #endif history[endIndexHistory].startIndex = 0; history[endIndexHistory].endIndex = 1; int index = (endIndexHistory+1)%NUM_HIST; if(index == startIndexHistory) { startIndexHistory = (startIndexHistory+1)%NUM_HIST; } else { ++historySize; } endIndexHistory = index; #ifdef PRINT std::cout << "Added new Vehicle with ID " << ID << std::endl; #endif currentHistoryOnDevice = false; #if SZENARIO == 6 h_convoyCheck[convoyCheckSize] = h_vehicleSim[i]; #else h_convoyCheck[convoyCheckSize] = h_vehicles[i]; #endif ++convoyCheckSize; } else { //vehicle matched, update PointCellDevice* tmp = trackedVehicles.at(trackedVehicles.size() -1 ); PointCellDevice* update = trackedVehicles.at(minIndex); #ifdef PRINT std::cout << "Update ID " << update->getID() << std::endl; #endif trackedVehicles.at(minIndex) = tmp; h_intvlIndex[minIndex] = h_intvlIndex[trackedVehicles.size()-1]; h_intvlIndex[trackedVehicles.size()-1] = minIndex; #if SZENARIO == 6 h_updateData[updateCounter*3] = h_vehicleSim[i].getX(); h_updateData[updateCounter*3+1] = h_vehicleSim[i].getY(); h_updateData[updateCounter*3+2] = h_vehicleSim[i].getTheta(); #else h_updateData[updateCounter*3] = h_vehicles[i].getX(); h_updateData[updateCounter*3+1] = h_vehicles[i].getY(); h_updateData[updateCounter*3+2] = h_vehicles[i].getTheta(); #endif trackedVehicles.pop_back(); #ifdef PRINT std::cout << "Updated vehicle with ID " << update->getID() << std::endl; #endif updateCheck.push_back(update); ++updateCounter; } } for(int i=0; i<updateCounter; i++) { for(int j=0; j<intervalSize; j++) { if(updateCheck[i] == &h_intervalMap[j]) { h_intvlIndex[i] = j; break; } } } //Update all matched vehicles if(updateCounter >0) { cudaMemcpyAsync(d_history, history, historySize*sizeof(History), cudaMemcpyHostToDevice, stream4); cudaMemcpyAsync(d_intervalMap, h_intervalMap, intervalSize*sizeof(PointCellDevice), cudaMemcpyHostToDevice, stream5); cudaMemcpyAsync(d_updataData, h_updateData, updateCounter*3*sizeof(float), cudaMemcpyHostToDevice, stream2); cudaMemcpyAsync(d_intvlIndex, h_intvlIndex, updateCounter*sizeof(int), cudaMemcpyHostToDevice, stream3); cudaDeviceSynchronize(); updateKernel<<<updateCounter,25>>>(d_intervalMap, d_updataData, d_intvlIndex); findHistoryWithIDDevice<<<historySize,updateCounter>>>(d_history,d_intervalMap,d_intvlIndex,d_IDincluded); checkHistoryForDuplicateDevice<<<updateCounter, MAX_LENGTH_HIST_CONV>>>(d_history,d_intervalMap,d_intvlIndex,d_IDincluded,d_duplicate); addUpdatedPositionToHistoryDevice<<<1,updateCounter>>>(d_history, d_intervalMap,d_intvlIndex, d_IDincluded,d_duplicate); cudaMemcpyAsync(h_intervalMap, d_intervalMap, intervalSize*sizeof(PointCellDevice), cudaMemcpyDeviceToHost, stream3); cudaMemcpyAsync(history, d_history, historySize*sizeof(History), cudaMemcpyDeviceToHost, stream4); cudaDeviceSynchronize(); for(int i=0; i<updateCounter;i++) { h_convoyCheck[convoyCheckSize] = h_intervalMap[h_intvlIndex[i]]; ++convoyCheckSize; } } //delete all tracks that could not be matched for(uint k = 0; k < trackedVehicles.size(); k++) { PointCellDevice* tmp = trackedVehicles.at(k); for(uint m = 0; m < intervalSize; m++) { if(tmp == &h_intervalMap[m]) { h_intervalMap[m] = h_intervalMap[--intervalSize]; break; } } } //add all observations that could not be matched for(uint k = 0; k < indicesToAdd.size(); k++) { if(intervalSize < MAX_SEGMENTS) { #if SZENARIO == 6 h_intervalMap[intervalSize++] = h_vehicleSim[indicesToAdd.at(k)]; #else h_intervalMap[intervalSize++] = h_vehicles[indicesToAdd.at(k)]; #endif } } //find Convoy if(historySize > 0 && convoyCheckSize >0) { cudaMemcpyAsync(d_newVeh, h_convoyCheck, convoyCheckSize*sizeof(PointCellDevice), cudaMemcpyHostToDevice,stream2); cudaMemcpyAsync(d_history, history, historySize*sizeof(History), cudaMemcpyHostToDevice, stream4); dim3 grid(historySize, convoyCheckSize); cudaDeviceSynchronize(); findConvoyDevice<<<grid, MAX_LENGTH_HIST_CONV>>>(d_newVeh,d_history,d_historyMatch); cudaMemcpy(h_historyMatch, d_historyMatch, convoyCheckSize*sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(uint i=0; i<convoyCheckSize;i++) { if(h_historyMatch[i] != INT_MAX) { PointCellDevice vehicle = h_convoyCheck[i]; float x = vehicle.getX(); int interval = floor(x); int id1 = h_historyMatch[i]; int id2 = vehicle.getID(); #ifdef PRINT std::cout << "ID1 " << id1 << " ID2 " << id2 << std::endl; #endif bool convoyFound = false; if(convoySize >0) { cudaMemcpy(d_convoys, convoys, convoySize*sizeof(Convoy), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); findIDInConvoyDevice<<<convoySize, MAX_LENGTH_HIST_CONV,0,stream3>>>(d_convoys, d_IDincluded,id1,id2); checkConvoyForDuplicateDevice<<<convoySize, MAX_LENGTH_HIST_CONV,0,stream2>>>(d_convoys, &(d_newVeh[i]),d_duplicate); cudaMemcpyAsync(h_IDincluded, d_IDincluded, convoySize*2*sizeof(int), cudaMemcpyDeviceToHost, stream3); cudaMemcpyAsync(h_duplicate, d_duplicate, convoySize*sizeof(bool), cudaMemcpyDeviceToHost, stream2); cudaDeviceSynchronize(); } for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV) { Convoy currentConvoy = convoys[j]; int it1 = h_IDincluded[j*2]; int it2 = h_IDincluded[j*2+1]; if(it1 == INT_MIN || it2 == INT_MIN) { continue; } if(it1 != INT_MAX && it2 != INT_MAX) { //convoy already exists with both IDS //check if this x value is already contained if(h_duplicate[j]) { //x value is not contained int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY(); convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta(); convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5f > convoys[j].highestValue.x) { convoys[j].highestValue.x = interval+0.5f; convoys[j].highestValue.y = vehicle.getY(); convoys[j].highestValue.theta = vehicle.getTheta(); convoys[j].highestValue.subIntvl = 0.5f; } } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl; #endif break; } } if(convoyFound) { continue; } for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV) { Convoy currentConvoy = convoys[j]; int it1 = h_IDincluded[j*2]; int it2 = h_IDincluded[j*2+1]; if(it1 == INT_MIN || it2 == INT_MIN) { continue; } if (it1 != INT_MAX) { int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; //check if this x value is already contained if(h_duplicate[j]) { convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY(); convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta(); convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5 > convoys[j].highestValue.x) { convoys[j].highestValue.x = interval+0.5f; convoys[j].highestValue.y = vehicle.getY(); convoys[j].highestValue.theta = vehicle.getTheta(); convoys[j].highestValue.subIntvl = 0.5f; } } int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV; convoys[j].participatingVehicles[currentConvoy.endIndexID] = id2; convoys[j].endIndexID = IDindex; if(IDindex == convoys[j].startIndexID) { convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV; } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl; #endif break; } else if (it2 != INT_MAX) { //only add position to convoy if it will be the highest value or the difference in y is not so big if(interval+0.5f < convoys[j].highestValue.x && !checkConvoyForY(vehicle.getY(),interval +0.5f,currentConvoy)) { continue; } int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; //check if this x value is already contained if(h_duplicate[j]) { convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY(); convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta(); convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5f > convoys[j].highestValue.x) { convoys[j].highestValue.x = interval+0.5f; convoys[j].highestValue.y = vehicle.getY(); convoys[j].highestValue.theta = vehicle.getTheta(); convoys[j].highestValue.subIntvl = 0.5f; } } int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV; convoys[j].participatingVehicles[currentConvoy.endIndexID] = id1; convoys[j].endIndexID = IDindex; if(IDindex == convoys[j].startIndexID) { convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV; } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl; #endif break; } } if(!convoyFound) { int cIndex = endIndexConvoys; convoys[cIndex].ID = convoyID++; convoys[cIndex].participatingVehicles[0] = id1; convoys[cIndex].participatingVehicles[1] = id2; convoys[cIndex].startIndexID = 0; convoys[cIndex].endIndexID = 2; convoys[cIndex].startIndexTracks = 0; convoys[cIndex].endIndexTracks = 1; convoys[cIndex].tracks[0].x = interval+0.5f; convoys[cIndex].tracks[0].y = vehicle.getY(); convoys[cIndex].tracks[0].theta = vehicle.getTheta(); convoys[cIndex].tracks[0].subIntvl = 0.5f; endIndexConvoys = (endIndexConvoys+1)%NUM_CONV; convoys[cIndex].highestValue.x = interval+0.5f; convoys[cIndex].highestValue.y = vehicle.getY(); convoys[cIndex].highestValue.theta = vehicle.getTheta(); convoys[cIndex].highestValue.subIntvl = 0.5f; if(convoySize == NUM_CONV) { startIndexConvoys = (startIndexConvoys+1)%NUM_CONV; } else { ++convoySize; } #ifdef PRINT std::cout << "new Convoy with ID " << convoyID-1 << " containing "<< id1 << " , " << id2 << std::endl; #endif } currentConvoyOnDevice = false; } } } } void ConvoyTracker::visualizeConvoys() { visualizer.visualizeConvoys(EML, convoys, startIndexConvoys, endIndexConvoys); } void ConvoyTracker::visualizeHistory() { visualizer.visualizeHistory(EML, history, startIndexHistory, endIndexHistory); } void ConvoyTracker::transformDataToDevice() { cudaMemcpyAsync(d_history, history, historySize*sizeof(History), cudaMemcpyHostToDevice,stream2); cudaMemcpyAsync(d_intervalMap, h_intervalMap, intervalSize*sizeof(PointCellDevice), cudaMemcpyHostToDevice,stream4); *h_historyMatchSelf = INT_MAX; cudaMemcpyAsync(d_historyMatchSelf, h_historyMatchSelf, sizeof(int), cudaMemcpyHostToDevice,stream5); cudaMemcpyAsync(d_convoys, convoys, convoySize*sizeof(Convoy), cudaMemcpyHostToDevice,stream3); cudaDeviceSynchronize(); } void ConvoyTracker::transformDataFromDevice() { cudaDeviceSynchronize(); std::vector<int> toDelete; cudaMemcpyAsync(history, d_history, historySize*sizeof(History), cudaMemcpyDeviceToHost,stream2); cudaMemcpyAsync(h_intervalMap, d_intervalMap, intervalSize*sizeof(PointCellDevice), cudaMemcpyDeviceToHost,stream4); cudaMemcpyAsync(convoys, d_convoys, convoySize*sizeof(Convoy), cudaMemcpyDeviceToHost,stream3); cudaDeviceSynchronize(); if(historySize > 0) { *h_historyMatchSelf = INT_MAX; findConvoyDeviceSelf<<<historySize, MAX_LENGTH_HIST_CONV>>>(d_history, d_historyMatchSelf); cudaMemcpy(h_historyMatchSelf, d_historyMatchSelf, sizeof(int), cudaMemcpyDeviceToHost); if(*h_historyMatchSelf != INT_MAX) { findConvoySelf(*h_historyMatchSelf); } } int end; for (int i = startIndexHistory; i != endIndexHistory; i = (i+1)%NUM_HIST) { int endId = (history[i].endIndex-1)%MAX_LENGTH_HIST_CONV; if(endId <0) { endId = MAX_LENGTH_HIST_CONV-1; } if(history[i].tracks[endId].x < -5) { //if yes, mark history to delete #ifdef PRINT std::cout << "Delete history with ID " << history[i].ID << std::endl; #endif toDelete.push_back(i); } } if(toDelete.size() > 0) { for(int i=toDelete.size()-1; i>=0; i--) { end = (endIndexHistory-1)%NUM_HIST; if(end < 0) { end = NUM_HIST-1; } if(toDelete.at(i) != end) { history[toDelete.at(i)] = history[end]; } endIndexHistory = end; --historySize; } } toDelete.clear(); for (int i = startIndexConvoys; i != endIndexConvoys; i = (i + 1) % NUM_CONV) { end = (convoys[i].endIndexTracks-1) % MAX_LENGTH_HIST_CONV; if(end < 0) { end = MAX_LENGTH_HIST_CONV-1; } if(convoys[i].highestValue.x < -5) { #ifdef PRINT std::cout << "delete convoy with ID " << convoys[i].ID << std::endl; #endif toDelete.push_back(i); } } if(toDelete.size() > 0) { for(int i=toDelete.size()-1; i >=0; i--) { end = (endIndexConvoys-1) % NUM_CONV; if(end < 0) { end = NUM_CONV-1; } convoys[toDelete.at(i)] = convoys[end]; endIndexConvoys = end; --convoySize; } } toDelete.clear(); for(uint i=0; i<intervalSize;i++) { if(h_intervalMap[i].getX() < -100) { toDelete.push_back(i); } } if(toDelete.size() > 0) { for(int i=toDelete.size()-1; i>=0;i--) { h_intervalMap[toDelete.at(i)] = h_intervalMap[--intervalSize]; } } } void ConvoyTracker::findConvoySelf(int ID) { double x = 0; int interval = floor(x); int id1 = -1; int id2 = ID; #ifdef PRINT std::cout << "ID1 " << id1 << " ID2 " << id2 << std::endl; #endif bool convoyFound = false; if(convoySize >0) { cudaMemcpy(d_convoys, convoys, convoySize*sizeof(Convoy), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); findIDInConvoyDevice<<<convoySize, MAX_LENGTH_HIST_CONV,0,stream3>>>(d_convoys, d_IDincluded,id1,id2); checkConvoyForDuplicateDeviceSelf<<<convoySize, MAX_LENGTH_HIST_CONV,0,stream2>>>(d_convoys,d_duplicate); cudaMemcpyAsync(h_IDincluded, d_IDincluded, convoySize*2*sizeof(int), cudaMemcpyDeviceToHost, stream3); cudaMemcpyAsync(h_duplicate, d_duplicate, convoySize*sizeof(bool), cudaMemcpyDeviceToHost, stream2); cudaDeviceSynchronize(); } for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV) { Convoy currentConvoy = convoys[j]; int it1 = h_IDincluded[j*2]; int it2 = h_IDincluded[j*2+1]; if(it1 != INT_MAX && it2 != INT_MAX) { //convoy already exists with both IDS //check if this x value is already contained if(h_duplicate[j]) { //x value is not contained int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; convoys[j].tracks[currentConvoy.endIndexTracks].x = 0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = 0; convoys[j].tracks[currentConvoy.endIndexTracks].theta = 0; convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5 > convoys[j].highestValue.x) { convoys[j].highestValue.x = 0.5f; convoys[j].highestValue.y = 0; convoys[j].highestValue.theta = 0; convoys[j].highestValue.subIntvl = 0.5f; } } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << std::endl; #endif break; } else if (it1 != INT_MAX) { int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV; //check if this x value is already contained if(h_duplicate[j]) { convoys[j].tracks[currentConvoy.endIndexTracks].x = 0.5f; convoys[j].tracks[currentConvoy.endIndexTracks].y = 0; convoys[j].tracks[currentConvoy.endIndexTracks].theta = 0; convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f; convoys[j].endIndexTracks = index; if(index == convoys[j].startIndexTracks) { convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV; } if(interval+0.5 > convoys[j].highestValue.x) { convoys[j].highestValue.x = 0.5f; convoys[j].highestValue.y = 0; convoys[j].highestValue.theta = 0; convoys[j].highestValue.subIntvl = 0.5f; } } int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV; convoys[j].participatingVehicles[currentConvoy.endIndexID] = id2; convoys[j].endIndexID = IDindex; if(IDindex == convoys[j].startIndexID) { convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV; } convoyFound = true; #ifdef PRINT std::cout << "existing Convoy with ID " << convoys[j].ID << std::endl; #endif break; } } if(!convoyFound) { int cIndex = endIndexConvoys; convoys[cIndex].ID = convoyID++; convoys[cIndex].participatingVehicles[0] = id1; convoys[cIndex].participatingVehicles[1] = id2; convoys[cIndex].startIndexID = 0; convoys[cIndex].endIndexID = 2; convoys[cIndex].startIndexTracks = 0; convoys[cIndex].endIndexTracks = 1; convoys[cIndex].tracks[0].x = 0.5; convoys[cIndex].tracks[0].y = 0; convoys[cIndex].tracks[0].theta = 0; convoys[cIndex].tracks[0].subIntvl = 0.5; endIndexConvoys = (endIndexConvoys+1)%NUM_CONV; convoys[cIndex].highestValue.x = 0.5; convoys[cIndex].highestValue.y = 0; convoys[cIndex].highestValue.theta = 0; convoys[cIndex].highestValue.subIntvl = 0.5; if(convoySize == NUM_CONV) { startIndexConvoys = (startIndexConvoys+1)%NUM_CONV; } else { ++convoySize; } #ifdef PRINT std::cout << "new Convoy with ID " << convoyID-1 << " containing "<< id1 << " , " << id2 << std::endl; #endif } } bool ConvoyTracker::checkConvoyForY(float y, float x, Convoy c) { double min = INT_MAX; double dist; int index; for(int i=c.startIndexTracks; i != c.endIndexTracks; i = (i+1)%MAX_LENGTH_HIST_CONV) { dist = fabsf(c.tracks[i].x - x); if(dist < min) { min = dist; index = i; } } dist = fabsf(c.tracks[index].y - y); if(dist > CONVOY_THRESHOLD_Y) { return false; } return true; }
9359916fbb45a17d347be8669ea6e3b91d1fc5e4.hip
// !!! This is a file automatically generated by hipify!!! #include <benchmark/benchmark.h> #include "init/init.hpp" #include "prefixsum/args.hpp" #include "utils/utils.hpp" #include <thrust/execution_policy.h> #include <thrust/scan.h> static void THRUST_SEGMENTED2_PREFIXSUM(benchmark::State &state) { const size_t num_segments = state.range(0); const size_t segment_size = state.range(1); const size_t num_elements = num_segments * segment_size; hipEvent_t start, stop; half *d_in_fp16 = nullptr; half *d_out = nullptr; try { PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(hipMalloc(&d_out, num_elements * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); PRINT_IF_ERROR(hipDeviceSynchronize()); PRINT_IF_ERROR(hipEventCreate(&start)); PRINT_IF_ERROR(hipEventCreate(&stop)); defer(hipEventDestroy(start)); defer(hipEventDestroy(stop)); for (auto _ : state) { PRINT_IF_ERROR(hipEventRecord(start)); for (size_t ii = 0; ii < num_segments; ii++) { thrust::inclusive_scan(thrust::device, d_in_fp16 + ii * segment_size, d_in_fp16 + (ii + 1) * segment_size, d_out + ii * segment_size); } PRINT_IF_ERROR(hipEventRecord(stop)); PRINT_IF_ERROR(hipEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_segments", num_segments}, {"num_elements", num_segments * segment_size}, {"segment_size", segment_size}, {"flops", {state.iterations() * 1.0 * num_elements, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half *h_out = new half[num_elements]; PRINT_IF_ERROR(hipMemcpy(h_out, d_out, num_elements * sizeof(half), hipMemcpyDeviceToHost)); int errors = 0; float correct_sum = 0; for (int i = 0; i < num_elements; i++) { correct_sum += h_in[i]; if (fabs(half_to_float((h_out[i])) - correct_sum) > 0.01) { errors++; printf("Expected %f, get h_out[%d] = %f\n", correct_sum, i, half_to_float(h_out[i])); } } if (errors > 0) { printf("THRUST_SEGMENTED2_PREFIXSUM does not agree with SEQUENTIAL! %d errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } delete h_out; #endif hipFree(d_in_fp16); } catch (...) { hipFree(d_in_fp16); hipDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_16_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_32_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_64_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_128_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_256_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_512_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_1024_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_2048_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_4096_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_8192_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_16384_ARGS()->UseManualTime(); static void THRUST_TUNE_SEGMENTED2_PREFIXSUM(benchmark::State &state) { THRUST_SEGMENTED2_PREFIXSUM(state); } #define RUN_THRUST_TUNE(TUNE_ARGS) \ BENCHMARK(THRUST_TUNE_SEGMENTED2_PREFIXSUM)->Apply(TUNE_ARGS)->UseManualTime(); // RUN_THRUST_TUNE(Tuning16_x_14); // RUN_THRUST_TUNE(Tuning16_x_18); RUN_THRUST_TUNE(Tuning16_x_22); // RUN_THRUST_TUNE(Tuning16_x_26); RUN_THRUST_TUNE(Tuning16_x_30);
9359916fbb45a17d347be8669ea6e3b91d1fc5e4.cu
#include <benchmark/benchmark.h> #include "init/init.hpp" #include "prefixsum/args.hpp" #include "utils/utils.hpp" #include <thrust/execution_policy.h> #include <thrust/scan.h> static void THRUST_SEGMENTED2_PREFIXSUM(benchmark::State &state) { const size_t num_segments = state.range(0); const size_t segment_size = state.range(1); const size_t num_elements = num_segments * segment_size; cudaEvent_t start, stop; half *d_in_fp16 = nullptr; half *d_out = nullptr; try { PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(cudaMalloc(&d_out, num_elements * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); PRINT_IF_ERROR(cudaDeviceSynchronize()); PRINT_IF_ERROR(cudaEventCreate(&start)); PRINT_IF_ERROR(cudaEventCreate(&stop)); defer(cudaEventDestroy(start)); defer(cudaEventDestroy(stop)); for (auto _ : state) { PRINT_IF_ERROR(cudaEventRecord(start)); for (size_t ii = 0; ii < num_segments; ii++) { thrust::inclusive_scan(thrust::device, d_in_fp16 + ii * segment_size, d_in_fp16 + (ii + 1) * segment_size, d_out + ii * segment_size); } PRINT_IF_ERROR(cudaEventRecord(stop)); PRINT_IF_ERROR(cudaEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_segments", num_segments}, {"num_elements", num_segments * segment_size}, {"segment_size", segment_size}, {"flops", {state.iterations() * 1.0 * num_elements, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half *h_out = new half[num_elements]; PRINT_IF_ERROR(cudaMemcpy(h_out, d_out, num_elements * sizeof(half), cudaMemcpyDeviceToHost)); int errors = 0; float correct_sum = 0; for (int i = 0; i < num_elements; i++) { correct_sum += h_in[i]; if (fabs(half_to_float((h_out[i])) - correct_sum) > 0.01) { errors++; printf("Expected %f, get h_out[%d] = %f\n", correct_sum, i, half_to_float(h_out[i])); } } if (errors > 0) { printf("THRUST_SEGMENTED2_PREFIXSUM does not agree with SEQUENTIAL! %d errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } delete h_out; #endif cudaFree(d_in_fp16); } catch (...) { cudaFree(d_in_fp16); cudaDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_16_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_32_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_64_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_128_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_256_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_512_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_1024_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_2048_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_4096_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_8192_ARGS()->UseManualTime(); BENCHMARK(THRUST_SEGMENTED2_PREFIXSUM)->SEG_16384_ARGS()->UseManualTime(); static void THRUST_TUNE_SEGMENTED2_PREFIXSUM(benchmark::State &state) { THRUST_SEGMENTED2_PREFIXSUM(state); } #define RUN_THRUST_TUNE(TUNE_ARGS) \ BENCHMARK(THRUST_TUNE_SEGMENTED2_PREFIXSUM)->Apply(TUNE_ARGS)->UseManualTime(); // RUN_THRUST_TUNE(Tuning16_x_14); // RUN_THRUST_TUNE(Tuning16_x_18); RUN_THRUST_TUNE(Tuning16_x_22); // RUN_THRUST_TUNE(Tuning16_x_26); RUN_THRUST_TUNE(Tuning16_x_30);
b710157db9a5c239289138ed099064707d2b15b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #include <cub/detail/cpp_compatibility.cuh> #include <cub/iterator/cache_modified_output_iterator.cuh> #include <cub/warp/warp_store.cuh> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include "fill_striped.cuh" // Has to go after all cub headers. Otherwise, this test won't catch unused // variables in cub kernels. #include "catch2_test_helper.h" template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, int TOTAL_WARPS, typename T, typename OutputIteratorT, typename ActionT> __global__ void warp_store_kernel(OutputIteratorT output_iterator, ActionT action) { using warp_store_t = cub::WarpStore<T, ITEMS_PER_THREAD, StoreAlgorithm, LOGICAL_WARP_THREADS>; using storage_t = typename warp_store_t::TempStorage; constexpr int tile_size = ITEMS_PER_THREAD * LOGICAL_WARP_THREADS; __shared__ storage_t storage[TOTAL_WARPS]; const int tid = cub::RowMajorTid(blockDim.x, blockDim.y, blockDim.z); T reg[ITEMS_PER_THREAD]; for (int item = 0; item < ITEMS_PER_THREAD; item++) { reg[item] = static_cast<T>(tid * ITEMS_PER_THREAD + item); } const int warp_id = tid / LOGICAL_WARP_THREADS; warp_store_t store(storage[warp_id]); action(store, output_iterator + (warp_id * tile_size), reg); } template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, int TOTAL_WARPS, typename T, typename OutputIteratorT, typename ActionT> void warp_store(OutputIteratorT output_iterator, ActionT action) { hipLaunchKernelGGL(( warp_store_kernel<StoreAlgorithm, LOGICAL_WARP_THREADS, ITEMS_PER_THREAD, TOTAL_WARPS, T, OutputIteratorT, ActionT>), dim3(1), dim3(TOTAL_WARPS * LOGICAL_WARP_THREADS), 0, 0, output_iterator, action); } struct guarded_store_t { int valid_items; template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, typename T, typename OutputIteratorT> __device__ void operator()(cub::WarpStore<T, ITEMS_PER_THREAD, StoreAlgorithm, LOGICAL_WARP_THREADS> store, OutputIteratorT output, T (&reg)[ITEMS_PER_THREAD]) { store.Store(output, reg, valid_items); } }; struct unguarded_store_t { template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, typename T, typename OutputIteratorT> __device__ void operator()(cub::WarpStore<T, ITEMS_PER_THREAD, StoreAlgorithm, LOGICAL_WARP_THREADS> store, OutputIteratorT output, T (&reg)[ITEMS_PER_THREAD]) { store.Store(output, reg); } }; template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, int TOTAL_WARPS, typename T> thrust::device_vector<T> compute_reference(int valid_items) { const int tile_size = LOGICAL_WARP_THREADS * ITEMS_PER_THREAD; const int total_item_count = TOTAL_WARPS * tile_size; thrust::device_vector<T> d_input(total_item_count); CUB_IF_CONSTEXPR(StoreAlgorithm == cub::WarpStoreAlgorithm::WARP_STORE_STRIPED) { thrust::host_vector<T> input(total_item_count); fill_striped<ITEMS_PER_THREAD, LOGICAL_WARP_THREADS, ITEMS_PER_THREAD * TOTAL_WARPS>( input.begin()); d_input = input; } else { c2h::gen(c2h::modulo_t{d_input.size()}, d_input); } if (valid_items != total_item_count) { for (int warp_id = 0; warp_id < TOTAL_WARPS; warp_id++) { thrust::fill(d_input.begin() + warp_id * tile_size + valid_items, d_input.begin() + (warp_id + 1) * tile_size, T{}); } } return d_input; } // %PARAM% LWT lwt 4:16:32 // %PARAM% ALGO_TYPE alg 0:1:2:3 using types = c2h::type_list<std::uint8_t, std::uint16_t, std::int32_t, std::int64_t>; using items_per_thread = c2h::enum_type_list<int, 1, 4, 7>; using logical_warp_threads = c2h::enum_type_list<int, LWT>; using algorithms = c2h::enum_type_list<cub::WarpStoreAlgorithm, cub::WarpStoreAlgorithm::WARP_STORE_DIRECT, cub::WarpStoreAlgorithm::WARP_STORE_STRIPED, cub::WarpStoreAlgorithm::WARP_STORE_TRANSPOSE, cub::WarpStoreAlgorithm::WARP_STORE_VECTORIZE>; using algorithm = c2h::enum_type_list<cub::WarpStoreAlgorithm, c2h::get<ALGO_TYPE, algorithms>::value>; using cache_store_modifier = c2h::enum_type_list<cub::CacheStoreModifier, cub::CacheStoreModifier::STORE_DEFAULT, cub::CacheStoreModifier::STORE_WB, cub::CacheStoreModifier::STORE_CG, cub::CacheStoreModifier::STORE_CS, cub::CacheStoreModifier::STORE_WT, cub::CacheStoreModifier::STORE_VOLATILE>; constexpr int guarded_store_tests_count = 30; template <int logical_warp_threads> struct total_warps_t { private: static constexpr int max_warps = 2; static constexpr bool is_arch_warp = (logical_warp_threads == CUB_WARP_THREADS(0)); static constexpr bool is_pow_of_two = ((logical_warp_threads & (logical_warp_threads - 1)) == 0); static constexpr int total_warps = (is_arch_warp || is_pow_of_two) ? max_warps : 1; public: static constexpr int value() { return total_warps; } }; template <class TestType> struct params_t { using type = typename c2h::get<0, TestType>; static constexpr int logical_warp_threads = c2h::get<1, TestType>::value; static constexpr int items_per_thread = c2h::get<2, TestType>::value; static constexpr cub::WarpStoreAlgorithm algorithm = c2h::get<3, TestType>::value; static constexpr int total_warps = total_warps_t<logical_warp_threads>::value(); static constexpr int tile_size = logical_warp_threads * items_per_thread; static constexpr int total_item_count = total_warps * tile_size; }; CUB_TEST("Warp store guarded range works with pointer", "[store][warp]", types, logical_warp_threads, items_per_thread, algorithm) { using params = params_t<TestType>; using type = typename params::type; thrust::device_vector<type> d_out(params::total_item_count, type{}); const int valid_items = GENERATE_COPY(take(guarded_store_tests_count, random(0, params::tile_size - 1))); auto out = thrust::raw_pointer_cast(d_out.data()); warp_store<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(out, guarded_store_t{valid_items}); auto d_expected_output = compute_reference<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(valid_items); REQUIRE(d_expected_output == d_out); } CUB_TEST("Warp store guarded range works with cache modified iterator", "[store][warp]", types, logical_warp_threads, items_per_thread, algorithm, cache_store_modifier) { using params = params_t<TestType>; using type = typename params::type; constexpr cub::CacheStoreModifier store_modifier = c2h::get<4, TestType>::value; thrust::device_vector<type> d_out(params::total_item_count, type{}); const int valid_items = GENERATE_COPY(take(guarded_store_tests_count, random(0, params::tile_size - 1))); auto out = cub::CacheModifiedOutputIterator<store_modifier, type>(thrust::raw_pointer_cast(d_out.data())); warp_store<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(out, guarded_store_t{valid_items}); auto d_expected_output = compute_reference<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(valid_items); REQUIRE(d_expected_output == d_out); } CUB_TEST("Warp store unguarded range works with pointer", "[store][warp]", types, logical_warp_threads, items_per_thread, algorithm) { using params = params_t<TestType>; using type = typename params::type; thrust::device_vector<type> d_out(params::total_item_count, type{}); const int valid_items = params::tile_size; auto out = thrust::raw_pointer_cast(d_out.data()); warp_store<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(out, unguarded_store_t{}); auto d_expected_output = compute_reference<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(valid_items); REQUIRE(d_expected_output == d_out); } CUB_TEST("Warp store unguarded range works with cache modified iterator", "[store][warp]", types, logical_warp_threads, items_per_thread, algorithm, cache_store_modifier) { using params = params_t<TestType>; using type = typename params::type; constexpr cub::CacheStoreModifier store_modifier = c2h::get<4, TestType>::value; thrust::device_vector<type> d_out(params::total_item_count, type{}); const int valid_items = params::tile_size; auto out = cub::CacheModifiedOutputIterator<store_modifier, type>(thrust::raw_pointer_cast(d_out.data())); warp_store<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(out, unguarded_store_t{}); auto d_expected_output = compute_reference<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(valid_items); REQUIRE(d_expected_output == d_out); }
b710157db9a5c239289138ed099064707d2b15b5.cu
/****************************************************************************** * Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #include <cub/detail/cpp_compatibility.cuh> #include <cub/iterator/cache_modified_output_iterator.cuh> #include <cub/warp/warp_store.cuh> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include "fill_striped.cuh" // Has to go after all cub headers. Otherwise, this test won't catch unused // variables in cub kernels. #include "catch2_test_helper.h" template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, int TOTAL_WARPS, typename T, typename OutputIteratorT, typename ActionT> __global__ void warp_store_kernel(OutputIteratorT output_iterator, ActionT action) { using warp_store_t = cub::WarpStore<T, ITEMS_PER_THREAD, StoreAlgorithm, LOGICAL_WARP_THREADS>; using storage_t = typename warp_store_t::TempStorage; constexpr int tile_size = ITEMS_PER_THREAD * LOGICAL_WARP_THREADS; __shared__ storage_t storage[TOTAL_WARPS]; const int tid = cub::RowMajorTid(blockDim.x, blockDim.y, blockDim.z); T reg[ITEMS_PER_THREAD]; for (int item = 0; item < ITEMS_PER_THREAD; item++) { reg[item] = static_cast<T>(tid * ITEMS_PER_THREAD + item); } const int warp_id = tid / LOGICAL_WARP_THREADS; warp_store_t store(storage[warp_id]); action(store, output_iterator + (warp_id * tile_size), reg); } template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, int TOTAL_WARPS, typename T, typename OutputIteratorT, typename ActionT> void warp_store(OutputIteratorT output_iterator, ActionT action) { warp_store_kernel<StoreAlgorithm, LOGICAL_WARP_THREADS, ITEMS_PER_THREAD, TOTAL_WARPS, T, OutputIteratorT, ActionT><<<1, TOTAL_WARPS * LOGICAL_WARP_THREADS>>>(output_iterator, action); } struct guarded_store_t { int valid_items; template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, typename T, typename OutputIteratorT> __device__ void operator()(cub::WarpStore<T, ITEMS_PER_THREAD, StoreAlgorithm, LOGICAL_WARP_THREADS> store, OutputIteratorT output, T (&reg)[ITEMS_PER_THREAD]) { store.Store(output, reg, valid_items); } }; struct unguarded_store_t { template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, typename T, typename OutputIteratorT> __device__ void operator()(cub::WarpStore<T, ITEMS_PER_THREAD, StoreAlgorithm, LOGICAL_WARP_THREADS> store, OutputIteratorT output, T (&reg)[ITEMS_PER_THREAD]) { store.Store(output, reg); } }; template <cub::WarpStoreAlgorithm StoreAlgorithm, int LOGICAL_WARP_THREADS, int ITEMS_PER_THREAD, int TOTAL_WARPS, typename T> thrust::device_vector<T> compute_reference(int valid_items) { const int tile_size = LOGICAL_WARP_THREADS * ITEMS_PER_THREAD; const int total_item_count = TOTAL_WARPS * tile_size; thrust::device_vector<T> d_input(total_item_count); CUB_IF_CONSTEXPR(StoreAlgorithm == cub::WarpStoreAlgorithm::WARP_STORE_STRIPED) { thrust::host_vector<T> input(total_item_count); fill_striped<ITEMS_PER_THREAD, LOGICAL_WARP_THREADS, ITEMS_PER_THREAD * TOTAL_WARPS>( input.begin()); d_input = input; } else { c2h::gen(c2h::modulo_t{d_input.size()}, d_input); } if (valid_items != total_item_count) { for (int warp_id = 0; warp_id < TOTAL_WARPS; warp_id++) { thrust::fill(d_input.begin() + warp_id * tile_size + valid_items, d_input.begin() + (warp_id + 1) * tile_size, T{}); } } return d_input; } // %PARAM% LWT lwt 4:16:32 // %PARAM% ALGO_TYPE alg 0:1:2:3 using types = c2h::type_list<std::uint8_t, std::uint16_t, std::int32_t, std::int64_t>; using items_per_thread = c2h::enum_type_list<int, 1, 4, 7>; using logical_warp_threads = c2h::enum_type_list<int, LWT>; using algorithms = c2h::enum_type_list<cub::WarpStoreAlgorithm, cub::WarpStoreAlgorithm::WARP_STORE_DIRECT, cub::WarpStoreAlgorithm::WARP_STORE_STRIPED, cub::WarpStoreAlgorithm::WARP_STORE_TRANSPOSE, cub::WarpStoreAlgorithm::WARP_STORE_VECTORIZE>; using algorithm = c2h::enum_type_list<cub::WarpStoreAlgorithm, c2h::get<ALGO_TYPE, algorithms>::value>; using cache_store_modifier = c2h::enum_type_list<cub::CacheStoreModifier, cub::CacheStoreModifier::STORE_DEFAULT, cub::CacheStoreModifier::STORE_WB, cub::CacheStoreModifier::STORE_CG, cub::CacheStoreModifier::STORE_CS, cub::CacheStoreModifier::STORE_WT, cub::CacheStoreModifier::STORE_VOLATILE>; constexpr int guarded_store_tests_count = 30; template <int logical_warp_threads> struct total_warps_t { private: static constexpr int max_warps = 2; static constexpr bool is_arch_warp = (logical_warp_threads == CUB_WARP_THREADS(0)); static constexpr bool is_pow_of_two = ((logical_warp_threads & (logical_warp_threads - 1)) == 0); static constexpr int total_warps = (is_arch_warp || is_pow_of_two) ? max_warps : 1; public: static constexpr int value() { return total_warps; } }; template <class TestType> struct params_t { using type = typename c2h::get<0, TestType>; static constexpr int logical_warp_threads = c2h::get<1, TestType>::value; static constexpr int items_per_thread = c2h::get<2, TestType>::value; static constexpr cub::WarpStoreAlgorithm algorithm = c2h::get<3, TestType>::value; static constexpr int total_warps = total_warps_t<logical_warp_threads>::value(); static constexpr int tile_size = logical_warp_threads * items_per_thread; static constexpr int total_item_count = total_warps * tile_size; }; CUB_TEST("Warp store guarded range works with pointer", "[store][warp]", types, logical_warp_threads, items_per_thread, algorithm) { using params = params_t<TestType>; using type = typename params::type; thrust::device_vector<type> d_out(params::total_item_count, type{}); const int valid_items = GENERATE_COPY(take(guarded_store_tests_count, random(0, params::tile_size - 1))); auto out = thrust::raw_pointer_cast(d_out.data()); warp_store<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(out, guarded_store_t{valid_items}); auto d_expected_output = compute_reference<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(valid_items); REQUIRE(d_expected_output == d_out); } CUB_TEST("Warp store guarded range works with cache modified iterator", "[store][warp]", types, logical_warp_threads, items_per_thread, algorithm, cache_store_modifier) { using params = params_t<TestType>; using type = typename params::type; constexpr cub::CacheStoreModifier store_modifier = c2h::get<4, TestType>::value; thrust::device_vector<type> d_out(params::total_item_count, type{}); const int valid_items = GENERATE_COPY(take(guarded_store_tests_count, random(0, params::tile_size - 1))); auto out = cub::CacheModifiedOutputIterator<store_modifier, type>(thrust::raw_pointer_cast(d_out.data())); warp_store<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(out, guarded_store_t{valid_items}); auto d_expected_output = compute_reference<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(valid_items); REQUIRE(d_expected_output == d_out); } CUB_TEST("Warp store unguarded range works with pointer", "[store][warp]", types, logical_warp_threads, items_per_thread, algorithm) { using params = params_t<TestType>; using type = typename params::type; thrust::device_vector<type> d_out(params::total_item_count, type{}); const int valid_items = params::tile_size; auto out = thrust::raw_pointer_cast(d_out.data()); warp_store<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(out, unguarded_store_t{}); auto d_expected_output = compute_reference<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(valid_items); REQUIRE(d_expected_output == d_out); } CUB_TEST("Warp store unguarded range works with cache modified iterator", "[store][warp]", types, logical_warp_threads, items_per_thread, algorithm, cache_store_modifier) { using params = params_t<TestType>; using type = typename params::type; constexpr cub::CacheStoreModifier store_modifier = c2h::get<4, TestType>::value; thrust::device_vector<type> d_out(params::total_item_count, type{}); const int valid_items = params::tile_size; auto out = cub::CacheModifiedOutputIterator<store_modifier, type>(thrust::raw_pointer_cast(d_out.data())); warp_store<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(out, unguarded_store_t{}); auto d_expected_output = compute_reference<params::algorithm, params::logical_warp_threads, params::items_per_thread, params::total_warps, type>(valid_items); REQUIRE(d_expected_output == d_out); }
fcc7fef470a95938aff5c771e5ca732bf99e2049.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2012 Ben Barsdell * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* This file contains the boring boiler-plate code to manage the library. TODO: Test on 32-bit integer input Consider accepting 32-bit floats instead of 32-bit ints */ //#define DEDISP_DEBUG //#define DEDISP_BENCHMARK #include <dedisp.h> #include <vector> #include <algorithm> // For std::fill #include <thrust/host_vector.h> #include <thrust/device_vector.h> // For copying and scrunching the DM list #include <thrust/transform.h> #include <thrust/iterator/constant_iterator.h> #ifdef DEDISP_BENCHMARK #include <fstream> #endif #if defined(DEDISP_DEBUG) && DEDISP_DEBUG #include <stdio.h> // For printf #endif // TODO: Remove these when done benchmarking // ----------------------------------------- #if defined(DEDISP_BENCHMARK) #include <iostream> using std::cout; using std::endl; #include "stopwatch.hpp" #endif // ----------------------------------------- #include "gpu_memory.hpp" #include "transpose.hpp" #define DEDISP_DEFAULT_GULP_SIZE 65536 //131072 // Note: The implementation of the sub-band algorithm is a prototype only // Enable at your own risk! It may not be in a working state at all. //#define USE_SUBBAND_ALGORITHM #define DEDISP_DEFAULT_SUBBAND_SIZE 32 // TODO: Make sure this doesn't limit GPU constant memory // available to users. #define DEDISP_MAX_NCHANS 8192 // Internal word type used for transpose and dedispersion kernel typedef unsigned int dedisp_word; // Note: This must be included after the above #define and typedef #include "kernels_hip.cuh" // Define plan structure struct dedisp_plan_struct { // Size parameters dedisp_size dm_count; dedisp_size nchans; dedisp_size max_delay; dedisp_size gulp_size; // Physical parameters dedisp_float dt; dedisp_float f0; dedisp_float df; // Host arrays std::vector<dedisp_float> dm_list; // size = dm_count std::vector<dedisp_float> delay_table; // size = nchans std::vector<dedisp_bool> killmask; // size = nchans std::vector<dedisp_size> scrunch_list; // size = dm_count // Device arrays thrust::device_vector<dedisp_float> d_dm_list; thrust::device_vector<dedisp_float> d_delay_table; thrust::device_vector<dedisp_bool> d_killmask; thrust::device_vector<dedisp_size> d_scrunch_list; //StreamType stream; // Scrunching parameters dedisp_bool scrunching_enabled; dedisp_float pulse_width; dedisp_float scrunch_tol; }; // Private helper functions // ------------------------ template<typename T> T min(T a, T b) { return a<b ? a : b; } unsigned long div_round_up(unsigned long a, unsigned long b) { return (a-1) / b + 1; } // Internal abstraction for errors #if defined(DEDISP_DEBUG) && DEDISP_DEBUG #define throw_error(error) do { \ printf("An error occurred within dedisp on line %d of %s: %s", \ __LINE__, __FILE__, dedisp_get_error_string(error)); \ return (error); } while(0) #define throw_getter_error(error, retval) do { \ printf("An error occurred within dedisp on line %d of %s: %s", \ __LINE__, __FILE__, dedisp_get_error_string(error)); \ return (retval); } while(0) #else #define throw_error(error) return error #define throw_getter_error(error, retval) return retval #endif // DEDISP_DEBUG /* dedisp_error throw_error(dedisp_error error) { // Note: Could, e.g., put an error callback in here return error; } */ dedisp_error update_scrunch_list(dedisp_plan plan) { if( hipGetLastError() != hipSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } if( !plan->scrunching_enabled || 0 == plan->dm_count ) { plan->scrunch_list.resize(0); // Fill with 1's by default for safety plan->scrunch_list.resize(plan->dm_count, dedisp_size(1)); return DEDISP_NO_ERROR; } plan->scrunch_list.resize(plan->dm_count); dedisp_error error = generate_scrunch_list(&plan->scrunch_list[0], plan->dm_count, plan->dt, &plan->dm_list[0], plan->nchans, plan->f0, plan->df, plan->pulse_width, plan->scrunch_tol); if( error != DEDISP_NO_ERROR ) { return error; } // Allocate on and copy to the device try { plan->d_scrunch_list.resize(plan->dm_count); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } try { plan->d_scrunch_list = plan->scrunch_list; } catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); } return DEDISP_NO_ERROR; } // ------------------------ // Public functions // ---------------- dedisp_error dedisp_create_plan(dedisp_plan* plan_, dedisp_size nchans, dedisp_float dt, dedisp_float f0, dedisp_float df) { // Initialise to NULL for safety *plan_ = 0; if( hipGetLastError() != hipSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } int device_idx; hipGetDevice(&device_idx); // Check for parameter errors if( nchans > DEDISP_MAX_NCHANS ) { throw_error(DEDISP_NCHANS_EXCEEDS_LIMIT); } // Force the df parameter to be negative such that // freq[chan] = f0 + chan * df. df = -abs(df); dedisp_plan plan = new dedisp_plan_struct(); if( !plan ) { throw_error(DEDISP_MEM_ALLOC_FAILED); } plan->dm_count = 0; plan->nchans = nchans; plan->gulp_size = DEDISP_DEFAULT_GULP_SIZE; plan->max_delay = 0; plan->dt = dt; plan->f0 = f0; plan->df = df; //plan->stream = 0; // Generate delay table and copy to device memory // Note: The DM factor is left out and applied during dedispersion plan->delay_table.resize(plan->nchans); generate_delay_table(&plan->delay_table[0], plan->nchans, dt, f0, df); try { plan->d_delay_table.resize(plan->nchans); } catch(...) { dedisp_destroy_plan(plan); throw_error(DEDISP_MEM_ALLOC_FAILED); } try { plan->d_delay_table = plan->delay_table; } catch(...) { dedisp_destroy_plan(plan); throw_error(DEDISP_MEM_COPY_FAILED); } // Initialise the killmask plan->killmask.resize(plan->nchans, (dedisp_bool)true); try { plan->d_killmask.resize(plan->nchans); } catch(...) { dedisp_destroy_plan(plan); throw_error(DEDISP_MEM_ALLOC_FAILED); } dedisp_error err = dedisp_set_killmask(plan, (dedisp_bool*)0); if( err != DEDISP_NO_ERROR ) { dedisp_destroy_plan(plan); throw_error(err); } *plan_ = plan; return DEDISP_NO_ERROR; } dedisp_error dedisp_set_gulp_size(dedisp_plan plan, dedisp_size gulp_size) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } plan->gulp_size = gulp_size; return DEDISP_NO_ERROR; } dedisp_size dedisp_get_gulp_size(dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->gulp_size; } dedisp_error dedisp_set_dm_list(dedisp_plan plan, const dedisp_float* dm_list, dedisp_size count) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } if( !dm_list ) { throw_error(DEDISP_INVALID_POINTER); } if( hipGetLastError() != hipSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } plan->dm_count = count; plan->dm_list.assign(dm_list, dm_list+count); // Copy to the device try { plan->d_dm_list.resize(plan->dm_count); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } try { plan->d_dm_list = plan->dm_list; } catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); } // Calculate the maximum delay and store it in the plan plan->max_delay = dedisp_size(plan->dm_list[plan->dm_count-1] * plan->delay_table[plan->nchans-1] + 0.5); dedisp_error error = update_scrunch_list(plan); if( error != DEDISP_NO_ERROR ) { throw_error(error); } return DEDISP_NO_ERROR; } dedisp_error dedisp_generate_dm_list(dedisp_plan plan, dedisp_float dm_start, dedisp_float dm_end, dedisp_float ti, dedisp_float tol) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } if( hipGetLastError() != hipSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } // Generate the DM list (on the host) plan->dm_list.clear(); generate_dm_list(plan->dm_list, dm_start, dm_end, plan->dt, ti, plan->f0, plan->df, plan->nchans, tol); plan->dm_count = plan->dm_list.size(); // Allocate device memory for the DM list try { plan->d_dm_list.resize(plan->dm_count); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } try { plan->d_dm_list = plan->dm_list; } catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); } // Calculate the maximum delay and store it in the plan plan->max_delay = dedisp_size(plan->dm_list[plan->dm_count-1] * plan->delay_table[plan->nchans-1] + 0.5); dedisp_error error = update_scrunch_list(plan); if( error != DEDISP_NO_ERROR ) { throw_error(error); } return DEDISP_NO_ERROR; } dedisp_float * dedisp_generate_dm_list_guru (dedisp_float dm_start, dedisp_float dm_end, double dt, double ti, double f0, double df, dedisp_size nchans, double tol, dedisp_size * dm_count) { std::vector<dedisp_float> dm_table; generate_dm_list(dm_table, dm_start, dm_end, dt, ti, f0, df, nchans, tol); *dm_count = dm_table.size(); return &dm_table[0]; } dedisp_error dedisp_set_device(int device_idx) { if( hipGetLastError() != hipSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } hipError_t error = hipSetDevice(device_idx); // Note: hipErrorInvalidValue isn't a documented return value, but // it still gets returned :/ if( hipErrorInvalidDevice == error || hipErrorInvalidValue == error ) throw_error(DEDISP_INVALID_DEVICE_INDEX); else if( hipErrorSetOnActiveProcess == error ) throw_error(DEDISP_DEVICE_ALREADY_SET); else if( hipSuccess != error ) throw_error(DEDISP_UNKNOWN_ERROR); else return DEDISP_NO_ERROR; } dedisp_error dedisp_set_killmask(dedisp_plan plan, const dedisp_bool* killmask) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } if( hipGetLastError() != hipSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } if( 0 != killmask ) { // Copy killmask to plan (both host and device) plan->killmask.assign(killmask, killmask + plan->nchans); try { plan->d_killmask = plan->killmask; } catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); } } else { // Set the killmask to all true std::fill(plan->killmask.begin(), plan->killmask.end(), (dedisp_bool)true); thrust::fill(plan->d_killmask.begin(), plan->d_killmask.end(), (dedisp_bool)true); } return DEDISP_NO_ERROR; } /* dedisp_plan dedisp_set_stream(dedisp_plan plan, StreamType stream) { plan->stream = stream; return plan; } */ // Getters // ------- dedisp_size dedisp_get_max_delay(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); } return plan->max_delay; } dedisp_size dedisp_get_dm_delay(const dedisp_plan plan, int dm_trial) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); } if (dm_trial < 0 || dm_trial >= plan->dm_count ) { throw_getter_error(DEDISP_UNKNOWN_ERROR,0); } return (plan->dm_list[dm_trial] * plan->delay_table[plan->nchans-1] + 0.5); } dedisp_size dedisp_get_channel_count(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->nchans; } dedisp_size dedisp_get_dm_count(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->dm_count; } const dedisp_float* dedisp_get_dm_list(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); } return &plan->dm_list[0]; } const dedisp_bool* dedisp_get_killmask(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return &plan->killmask[0]; } dedisp_float dedisp_get_dt(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->dt; } dedisp_float dedisp_get_f0(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->f0; } dedisp_float dedisp_get_df(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->df; } // Warning: Big mother function dedisp_error dedisp_execute_guru(const dedisp_plan plan, dedisp_size nsamps, const dedisp_byte* in, dedisp_size in_nbits, dedisp_size in_stride, dedisp_byte* out, dedisp_size out_nbits, dedisp_size out_stride, dedisp_size first_dm_idx, dedisp_size dm_count, unsigned flags) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } if( hipGetLastError() != hipSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } enum { BITS_PER_BYTE = 8, BYTES_PER_WORD = sizeof(dedisp_word) / sizeof(dedisp_byte) }; dedisp_size out_bytes_per_sample = out_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE); if( 0 == in || 0 == out ) { throw_error(DEDISP_INVALID_POINTER); } // Note: Must be careful with integer division if( in_stride < plan->nchans*in_nbits/(sizeof(dedisp_byte)*BITS_PER_BYTE) || out_stride < (nsamps - plan->max_delay)*out_bytes_per_sample ) { throw_error(DEDISP_INVALID_STRIDE); } if( 0 == plan->dm_count ) { throw_error(DEDISP_NO_DM_LIST_SET); } if( nsamps < plan->max_delay ) { throw_error(DEDISP_TOO_FEW_NSAMPS); } // Check for valid synchronisation flags if( flags & DEDISP_ASYNC && flags & DEDISP_WAIT ) { throw_error(DEDISP_INVALID_FLAG_COMBINATION); } // Check for valid nbits values if( in_nbits != 1 && in_nbits != 2 && in_nbits != 4 && in_nbits != 8 && in_nbits != 16 && in_nbits != 32 ) { throw_error(DEDISP_UNSUPPORTED_IN_NBITS); } if( out_nbits != 8 && out_nbits != 16 && out_nbits != 32 ) { throw_error(DEDISP_UNSUPPORTED_OUT_NBITS); } bool using_host_memory; if( flags & DEDISP_HOST_POINTERS && flags & DEDISP_DEVICE_POINTERS ) { throw_error(DEDISP_INVALID_FLAG_COMBINATION); } else { using_host_memory = !(flags & DEDISP_DEVICE_POINTERS); } // Copy the lookup tables to constant memory on the device // TODO: This was much tidier, but thanks to CUDA's insistence on // breaking its API in v5.0 I had to mess it up like this. hipMemcpyToSymbolAsync(c_delay_table, thrust::raw_pointer_cast(&plan->d_delay_table[0]), plan->nchans * sizeof(dedisp_float), 0, hipMemcpyDeviceToDevice, 0); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if( error != hipSuccess ) { throw_error(DEDISP_MEM_COPY_FAILED); } hipMemcpyToSymbolAsync(c_killmask, thrust::raw_pointer_cast(&plan->d_killmask[0]), plan->nchans * sizeof(dedisp_bool), 0, hipMemcpyDeviceToDevice, 0); hipDeviceSynchronize(); error = hipGetLastError(); if( error != hipSuccess ) { throw_error(DEDISP_MEM_COPY_FAILED); } // Compute the problem decomposition dedisp_size nsamps_computed = nsamps - plan->max_delay; // Specify the maximum gulp size dedisp_size nsamps_computed_gulp_max; if( using_host_memory ) { nsamps_computed_gulp_max = min(plan->gulp_size, nsamps_computed); } else { // Just do it in one gulp if given device pointers nsamps_computed_gulp_max = nsamps_computed; } // Just to be sure // TODO: This seems quite wrong. Why was it here? /* if( nsamps_computed_gulp_max < plan->max_delay ) { throw_error(DEDISP_TOO_FEW_NSAMPS); } */ // Compute derived counts for maximum gulp size [dedisp_word == 4 bytes] dedisp_size nsamps_gulp_max = nsamps_computed_gulp_max + plan->max_delay; dedisp_size chans_per_word = sizeof(dedisp_word)*BITS_PER_BYTE / in_nbits; dedisp_size nchan_words = plan->nchans / chans_per_word; // We use words for processing but allow arbitrary byte strides, which are // not necessarily friendly. bool friendly_in_stride = (0 == in_stride % BYTES_PER_WORD); // Note: If desired, this could be rounded up, e.g., to a power of 2 dedisp_size in_buf_stride_words = nchan_words; dedisp_size in_count_gulp_max = nsamps_gulp_max * in_buf_stride_words; dedisp_size nsamps_padded_gulp_max = div_round_up(nsamps_computed_gulp_max, DEDISP_SAMPS_PER_THREAD) * DEDISP_SAMPS_PER_THREAD + plan->max_delay; dedisp_size in_count_padded_gulp_max = nsamps_padded_gulp_max * in_buf_stride_words; // TODO: Make this a parameter? dedisp_size min_in_nbits = 0; if( plan->scrunching_enabled ) { // TODO: This produces corrupt output when equal to 32 ! // Also check whether the unpacker is broken when in_nbits=32 ! min_in_nbits = 16; //32; } dedisp_size unpacked_in_nbits = max((int)in_nbits, (int)min_in_nbits); dedisp_size unpacked_chans_per_word = sizeof(dedisp_word)*BITS_PER_BYTE / unpacked_in_nbits; dedisp_size unpacked_nchan_words = plan->nchans / unpacked_chans_per_word; dedisp_size unpacked_buf_stride_words = unpacked_nchan_words; dedisp_size unpacked_count_padded_gulp_max = nsamps_padded_gulp_max * unpacked_buf_stride_words; dedisp_size out_stride_gulp_samples = nsamps_computed_gulp_max; dedisp_size out_stride_gulp_bytes = out_stride_gulp_samples * out_bytes_per_sample; dedisp_size out_count_gulp_max = out_stride_gulp_bytes * dm_count; // Organise device memory pointers // ------------------------------- const dedisp_word* d_in = 0; dedisp_word* d_transposed = 0; dedisp_word* d_unpacked = 0; dedisp_byte* d_out = 0; thrust::device_vector<dedisp_word> d_in_buf; thrust::device_vector<dedisp_word> d_transposed_buf; thrust::device_vector<dedisp_word> d_unpacked_buf; thrust::device_vector<dedisp_byte> d_out_buf; // Allocate temporary buffers on the device where necessary if( using_host_memory || !friendly_in_stride ) { try { d_in_buf.resize(in_count_gulp_max); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } d_in = thrust::raw_pointer_cast(&d_in_buf[0]); } else { d_in = (dedisp_word*)in; } if( using_host_memory ) { try { d_out_buf.resize(out_count_gulp_max); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } d_out = thrust::raw_pointer_cast(&d_out_buf[0]); } else { d_out = out; } //// Note: * 2 here is for the time-scrunched copies of the data try { d_transposed_buf.resize(in_count_padded_gulp_max/* * 2 */); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } d_transposed = thrust::raw_pointer_cast(&d_transposed_buf[0]); // Note: * 2 here is for the time-scrunched copies of the data try { d_unpacked_buf.resize(unpacked_count_padded_gulp_max * 2); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } d_unpacked = thrust::raw_pointer_cast(&d_unpacked_buf[0]); // ------------------------------- // The stride (in words) between differently-scrunched copies of the // unpacked data. dedisp_size scrunch_stride = unpacked_count_padded_gulp_max; #ifdef USE_SUBBAND_ALGORITHM dedisp_size sb_size = DEDISP_DEFAULT_SUBBAND_SIZE; // Note: Setting these two parameters equal should balance the two steps of // the sub-band algorithm. dedisp_size dm_size = sb_size; // Ndm' dedisp_size sb_count = plan->nchans / sb_size; dedisp_size nom_dm_count = dm_count / dm_size; thrust::device_vector<dedisp_word> d_intermediate_buf; try { d_intermediate_buf.resize(nsamps_padded_gulp_max * sb_count * nom_dm_count); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } dedisp_word* d_intermediate = thrust::raw_pointer_cast(&d_intermediate_buf[0]); #endif // USE_SUBBAND_ALGORITHM // TODO: Eventually re-implement streams hipStream_t stream = 0;//(hipStream_t)plan->stream; #ifdef DEDISP_BENCHMARK Stopwatch copy_to_timer; Stopwatch copy_from_timer; Stopwatch transpose_timer; Stopwatch kernel_timer; #endif // Gulp loop for( dedisp_size gulp_samp_idx=0; gulp_samp_idx<nsamps_computed; gulp_samp_idx+=nsamps_computed_gulp_max ) { dedisp_size nsamps_computed_gulp = min(nsamps_computed_gulp_max, nsamps_computed-gulp_samp_idx); dedisp_size nsamps_gulp = nsamps_computed_gulp + plan->max_delay; dedisp_size nsamps_padded_gulp = div_round_up(nsamps_computed_gulp, DEDISP_SAMPS_PER_THREAD) * DEDISP_SAMPS_PER_THREAD + plan->max_delay; #ifdef DEDISP_BENCHMARK copy_to_timer.start(); #endif // Copy the input data from host to device if necessary if( using_host_memory ) { // Allowing arbitrary byte strides means we must do a strided copy if( !copy_host_to_device_2d((dedisp_byte*)d_in, in_buf_stride_words * BYTES_PER_WORD, in + gulp_samp_idx*in_stride, in_stride, nchan_words * BYTES_PER_WORD, nsamps_gulp) ) { throw_error(DEDISP_MEM_COPY_FAILED); } } else if( !friendly_in_stride ) { // Device pointers with unfriendly stride if( !copy_device_to_device_2d((dedisp_byte*)d_in, in_buf_stride_words * BYTES_PER_WORD, in + gulp_samp_idx*in_stride, in_stride, nchan_words * BYTES_PER_WORD, nsamps_gulp) ) { throw_error(DEDISP_MEM_COPY_FAILED); } } #ifdef DEDISP_BENCHMARK hipDeviceSynchronize(); copy_to_timer.stop(); transpose_timer.start(); #endif // Transpose the words in the input Transpose<dedisp_word> transpose; transpose.transpose(d_in, nchan_words, nsamps_gulp, in_buf_stride_words, nsamps_padded_gulp, d_transposed); #ifdef DEDISP_BENCHMARK hipDeviceSynchronize(); transpose_timer.stop(); kernel_timer.start(); #endif // Unpack the transposed data unpack(d_transposed, nsamps_padded_gulp, nchan_words, d_unpacked, in_nbits, unpacked_in_nbits); // Compute time-scrunched copies of the data if( plan->scrunching_enabled ) { dedisp_size max_scrunch = plan->scrunch_list[plan->dm_count-1]; dedisp_size scrunch_in_offset = 0; dedisp_size scrunch_out_offset = scrunch_stride; for( dedisp_size s=2; s<=max_scrunch; s*=2 ) { // TODO: Need to pass in stride and count? I.e., nsamps_padded/computed_gulp //scrunch_x2(&d_transposed[scrunch_in_offset], // nsamps_padded_gulp/(s/2), nchan_words, in_nbits, // &d_transposed[scrunch_out_offset]); scrunch_x2(&d_unpacked[scrunch_in_offset], nsamps_padded_gulp/(s/2), unpacked_nchan_words, unpacked_in_nbits, &d_unpacked[scrunch_out_offset]); scrunch_in_offset = scrunch_out_offset; scrunch_out_offset += scrunch_stride / s; } } #ifdef USE_SUBBAND_ALGORITHM // TODO: This has not been updated to use d_unpacked! dedisp_size chan_stride = 1; dedisp_size dm_stride = dm_size; dedisp_size ostride = nsamps_padded_gulp * sb_count; dedisp_size batch_size = sb_count; dedisp_size batch_in_stride = nsamps_padded_gulp * sb_size / chans_per_word; dedisp_size batch_dm_stride = 0; dedisp_size batch_chan_stride = sb_size; dedisp_size batch_out_stride = nsamps_padded_gulp; /* // Consistency checks if( (nom_dm_count-1)*dm_stride + (batch_size-1)*batch_dm_stride >= dm_count ) { throw std::runtime_error("DM STRIDES ARE INCONSISTENT"); } if( (sb_size-1)*chan_stride + (batch_size-1)*batch_chan_stride >= plan->nchans ) { throw std::runtime_error("CHAN STRIDES ARE INCONSISTENT"); } */ // Both steps if( !dedisperse(d_transposed, nsamps_padded_gulp, nsamps_computed_gulp, in_nbits, sb_size, chan_stride, thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]), nom_dm_count, dm_stride, (dedisp_byte*)d_intermediate, ostride, 32,//out_nbits, batch_size, batch_in_stride, batch_dm_stride, batch_chan_stride, batch_out_stride) ) { throw_error(DEDISP_INTERNAL_GPU_ERROR); } batch_size = nom_dm_count; chan_stride = sb_size; dm_stride = 1; ostride = out_stride_gulp_samples; batch_in_stride = nsamps_padded_gulp * sb_count; batch_dm_stride = 0; batch_chan_stride = 0; batch_out_stride = out_stride_gulp_samples * dm_size; /* // Consistency checks if( (dm_size-1)*dm_stride + (batch_size-1)*batch_dm_stride >= dm_count ) { throw std::runtime_error("DM STRIDES ARE INCONSISTENT"); } if( (sb_count-1)*chan_stride + (batch_size-1)*batch_chan_stride >= plan->nchans ) { throw std::runtime_error("CHAN STRIDES ARE INCONSISTENT"); } */ if( !dedisperse(d_intermediate, nsamps_padded_gulp, nsamps_computed_gulp, 32,//in_nbits, sb_count, chan_stride, thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]), dm_size, dm_stride, d_out, ostride, out_nbits, batch_size, batch_in_stride, batch_dm_stride, batch_chan_stride, batch_out_stride) ) { throw_error(DEDISP_INTERNAL_GPU_ERROR); } #else // Use direct algorithm if( plan->scrunching_enabled ) { // TODO: THIS WILL NOT WORK IF dm_count < plan->dm_count ! // Need to avoid assumption that scrunch starts at 1 // Must start the scrunch at the first *requested* DM thrust::device_vector<dedisp_float> d_scrunched_dm_list(dm_count); dedisp_size scrunch_start = 0; dedisp_size scrunch_offset = 0; for( dedisp_size s=0; s<dm_count; ++s ) { dedisp_size cur_scrunch = plan->scrunch_list[s]; // Look for segment boundaries if( s+1 == dm_count || plan->scrunch_list[s+1] != cur_scrunch ) { //dedisp_size next_scrunch = plan->scrunch_list[s]; //if( next_scrunch != cur_scrunch ) { dedisp_size scrunch_count = s+1 - scrunch_start; // Make a copy of the dm list divided by the scrunch factor // Note: This has the effect of increasing dt in the delay eqn dedisp_size dm_offset = first_dm_idx + scrunch_start; thrust::transform(plan->d_dm_list.begin() + dm_offset, plan->d_dm_list.begin() + dm_offset + scrunch_count, thrust::make_constant_iterator(cur_scrunch), d_scrunched_dm_list.begin(), thrust::divides<dedisp_float>()); dedisp_float* d_scrunched_dm_list_ptr = thrust::raw_pointer_cast(&d_scrunched_dm_list[0]); // TODO: Is this how the nsamps vars need to change? if( !dedisperse(//&d_transposed[scrunch_offset], &d_unpacked[scrunch_offset], nsamps_padded_gulp / cur_scrunch, nsamps_computed_gulp / cur_scrunch, unpacked_in_nbits, //in_nbits, plan->nchans, 1, d_scrunched_dm_list_ptr, scrunch_count, // dm_count 1, d_out + scrunch_start*out_stride_gulp_bytes, out_stride_gulp_samples, out_nbits, 1, 0, 0, 0, 0) ) { throw_error(DEDISP_INTERNAL_GPU_ERROR); } scrunch_offset += scrunch_stride / cur_scrunch; scrunch_start += scrunch_count; } } } else { // Perform direct dedispersion without scrunching if( !dedisperse(//d_transposed, d_unpacked, nsamps_padded_gulp, nsamps_computed_gulp, unpacked_in_nbits, //in_nbits, plan->nchans, 1, thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]), dm_count, 1, d_out, out_stride_gulp_samples, out_nbits, 1, 0, 0, 0, 0) ) { throw_error(DEDISP_INTERNAL_GPU_ERROR); } } #endif // SB/direct algorithm #ifdef DEDISP_BENCHMARK hipDeviceSynchronize(); kernel_timer.stop(); #endif // Copy output back to host memory if necessary if( using_host_memory ) { dedisp_size gulp_samp_byte_idx = gulp_samp_idx * out_bytes_per_sample; dedisp_size nsamp_bytes_computed_gulp = nsamps_computed_gulp * out_bytes_per_sample; #ifdef DEDISP_BENCHMARK copy_from_timer.start(); #endif if( plan->scrunching_enabled ) { // TODO: This for-loop isn't a very elegant solution dedisp_size scrunch_start = 0; for( dedisp_size s=0; s<dm_count; ++s ) { dedisp_size cur_scrunch = plan->scrunch_list[s]; // Look for segment boundaries if( s+1 == dm_count || plan->scrunch_list[s+1] != cur_scrunch ) { dedisp_size scrunch_count = s+1 - scrunch_start; dedisp_size src_stride = out_stride_gulp_bytes; dedisp_byte* src = d_out + scrunch_start * src_stride; dedisp_byte* dst = (out + scrunch_start * out_stride + gulp_samp_byte_idx / cur_scrunch); dedisp_size width = nsamp_bytes_computed_gulp / cur_scrunch; dedisp_size height = scrunch_count; copy_device_to_host_2d(dst, // dst out_stride, // dst stride src, // src src_stride, // src stride width, // width bytes height); // height scrunch_start += scrunch_count; } } } else { copy_device_to_host_2d(out + gulp_samp_byte_idx, // dst out_stride, // dst stride d_out, // src out_stride_gulp_bytes, // src stride nsamp_bytes_computed_gulp, // width bytes dm_count); // height } #ifdef DEDISP_BENCHMARK hipDeviceSynchronize(); copy_from_timer.stop(); #endif } } // End of gulp loop #ifdef DEDISP_BENCHMARK cout << "Copy to time: " << copy_to_timer.getTime() << endl; cout << "Copy from time: " << copy_from_timer.getTime() << endl; cout << "Transpose time: " << transpose_timer.getTime() << endl; cout << "Kernel time: " << kernel_timer.getTime() << endl; float total_time = copy_to_timer.getTime() + copy_from_timer.getTime() + transpose_timer.getTime() + kernel_timer.getTime(); cout << "Total time: " << total_time << endl; // Append the timing results to a log file std::ofstream perf_file("perf.log", std::ios::app); perf_file << copy_to_timer.getTime() << "\t" << copy_from_timer.getTime() << "\t" << transpose_timer.getTime() << "\t" << kernel_timer.getTime() << "\t" << total_time << endl; perf_file.close(); #endif if( !(flags & DEDISP_ASYNC) ) { hipStreamSynchronize(stream); } // Phew! return DEDISP_NO_ERROR; } dedisp_error dedisp_execute_adv(const dedisp_plan plan, dedisp_size nsamps, const dedisp_byte* in, dedisp_size in_nbits, dedisp_size in_stride, dedisp_byte* out, dedisp_size out_nbits, dedisp_size out_stride, unsigned flags) { dedisp_size first_dm_idx = 0; dedisp_size dm_count = plan->dm_count; return dedisp_execute_guru(plan, nsamps, in, in_nbits, in_stride, out, out_nbits, out_stride, first_dm_idx, dm_count, flags); } // TODO: Consider having the user specify nsamps_computed instead of nsamps dedisp_error dedisp_execute(const dedisp_plan plan, dedisp_size nsamps, const dedisp_byte* in, dedisp_size in_nbits, dedisp_byte* out, dedisp_size out_nbits, unsigned flags) { enum { BITS_PER_BYTE = 8 }; // Note: The default out_stride is nsamps - plan->max_delay dedisp_size out_bytes_per_sample = out_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE); // Note: Must be careful with integer division dedisp_size in_stride = plan->nchans * in_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE); dedisp_size out_stride = (nsamps - plan->max_delay) * out_bytes_per_sample; return dedisp_execute_adv(plan, nsamps, in, in_nbits, in_stride, out, out_nbits, out_stride, flags); } dedisp_error dedisp_sync(void) { if( hipDeviceSynchronize() != hipSuccess ) throw_error(DEDISP_PRIOR_GPU_ERROR); else return DEDISP_NO_ERROR; } void dedisp_destroy_plan(dedisp_plan plan) { if( plan ) { delete plan; } } const char* dedisp_get_error_string(dedisp_error error) { switch( error ) { case DEDISP_NO_ERROR: return "No error"; case DEDISP_MEM_ALLOC_FAILED: return "Memory allocation failed"; case DEDISP_MEM_COPY_FAILED: return "Memory copy failed"; case DEDISP_INVALID_DEVICE_INDEX: return "Invalid device index"; case DEDISP_DEVICE_ALREADY_SET: return "Device is already set and cannot be changed"; case DEDISP_NCHANS_EXCEEDS_LIMIT: return "No. channels exceeds internal limit"; case DEDISP_INVALID_PLAN: return "Invalid plan"; case DEDISP_INVALID_POINTER: return "Invalid pointer"; case DEDISP_INVALID_STRIDE: return "Invalid stride"; case DEDISP_NO_DM_LIST_SET: return "No DM list has been set"; case DEDISP_TOO_FEW_NSAMPS: return "No. samples < maximum delay"; case DEDISP_INVALID_FLAG_COMBINATION: return "Invalid flag combination"; case DEDISP_UNSUPPORTED_IN_NBITS: return "Unsupported in_nbits value"; case DEDISP_UNSUPPORTED_OUT_NBITS: return "Unsupported out_nbits value"; case DEDISP_PRIOR_GPU_ERROR: return "Prior GPU error."; case DEDISP_INTERNAL_GPU_ERROR: return "Internal GPU error. Please contact the author(s)."; case DEDISP_UNKNOWN_ERROR: return "Unknown error. Please contact the author(s)."; default: return "Invalid error code"; } } dedisp_error dedisp_enable_adaptive_dt(dedisp_plan plan, dedisp_float pulse_width, dedisp_float tol) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } plan->scrunching_enabled = true; plan->pulse_width = pulse_width; plan->scrunch_tol = tol; return update_scrunch_list(plan); } dedisp_error dedisp_disable_adaptive_dt(dedisp_plan plan) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } plan->scrunching_enabled = false; return update_scrunch_list(plan); } dedisp_bool dedisp_using_adaptive_dt(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,false); } return plan->scrunching_enabled; } const dedisp_size* dedisp_get_dt_factors(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); } return &plan->scrunch_list[0]; } // ----------------
fcc7fef470a95938aff5c771e5ca732bf99e2049.cu
/* * Copyright 2012 Ben Barsdell * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* This file contains the boring boiler-plate code to manage the library. TODO: Test on 32-bit integer input Consider accepting 32-bit floats instead of 32-bit ints */ //#define DEDISP_DEBUG //#define DEDISP_BENCHMARK #include <dedisp.h> #include <vector> #include <algorithm> // For std::fill #include <thrust/host_vector.h> #include <thrust/device_vector.h> // For copying and scrunching the DM list #include <thrust/transform.h> #include <thrust/iterator/constant_iterator.h> #ifdef DEDISP_BENCHMARK #include <fstream> #endif #if defined(DEDISP_DEBUG) && DEDISP_DEBUG #include <stdio.h> // For printf #endif // TODO: Remove these when done benchmarking // ----------------------------------------- #if defined(DEDISP_BENCHMARK) #include <iostream> using std::cout; using std::endl; #include "stopwatch.hpp" #endif // ----------------------------------------- #include "gpu_memory.hpp" #include "transpose.hpp" #define DEDISP_DEFAULT_GULP_SIZE 65536 //131072 // Note: The implementation of the sub-band algorithm is a prototype only // Enable at your own risk! It may not be in a working state at all. //#define USE_SUBBAND_ALGORITHM #define DEDISP_DEFAULT_SUBBAND_SIZE 32 // TODO: Make sure this doesn't limit GPU constant memory // available to users. #define DEDISP_MAX_NCHANS 8192 // Internal word type used for transpose and dedispersion kernel typedef unsigned int dedisp_word; // Note: This must be included after the above #define and typedef #include "kernels.cuh" // Define plan structure struct dedisp_plan_struct { // Size parameters dedisp_size dm_count; dedisp_size nchans; dedisp_size max_delay; dedisp_size gulp_size; // Physical parameters dedisp_float dt; dedisp_float f0; dedisp_float df; // Host arrays std::vector<dedisp_float> dm_list; // size = dm_count std::vector<dedisp_float> delay_table; // size = nchans std::vector<dedisp_bool> killmask; // size = nchans std::vector<dedisp_size> scrunch_list; // size = dm_count // Device arrays thrust::device_vector<dedisp_float> d_dm_list; thrust::device_vector<dedisp_float> d_delay_table; thrust::device_vector<dedisp_bool> d_killmask; thrust::device_vector<dedisp_size> d_scrunch_list; //StreamType stream; // Scrunching parameters dedisp_bool scrunching_enabled; dedisp_float pulse_width; dedisp_float scrunch_tol; }; // Private helper functions // ------------------------ template<typename T> T min(T a, T b) { return a<b ? a : b; } unsigned long div_round_up(unsigned long a, unsigned long b) { return (a-1) / b + 1; } // Internal abstraction for errors #if defined(DEDISP_DEBUG) && DEDISP_DEBUG #define throw_error(error) do { \ printf("An error occurred within dedisp on line %d of %s: %s", \ __LINE__, __FILE__, dedisp_get_error_string(error)); \ return (error); } while(0) #define throw_getter_error(error, retval) do { \ printf("An error occurred within dedisp on line %d of %s: %s", \ __LINE__, __FILE__, dedisp_get_error_string(error)); \ return (retval); } while(0) #else #define throw_error(error) return error #define throw_getter_error(error, retval) return retval #endif // DEDISP_DEBUG /* dedisp_error throw_error(dedisp_error error) { // Note: Could, e.g., put an error callback in here return error; } */ dedisp_error update_scrunch_list(dedisp_plan plan) { if( cudaGetLastError() != cudaSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } if( !plan->scrunching_enabled || 0 == plan->dm_count ) { plan->scrunch_list.resize(0); // Fill with 1's by default for safety plan->scrunch_list.resize(plan->dm_count, dedisp_size(1)); return DEDISP_NO_ERROR; } plan->scrunch_list.resize(plan->dm_count); dedisp_error error = generate_scrunch_list(&plan->scrunch_list[0], plan->dm_count, plan->dt, &plan->dm_list[0], plan->nchans, plan->f0, plan->df, plan->pulse_width, plan->scrunch_tol); if( error != DEDISP_NO_ERROR ) { return error; } // Allocate on and copy to the device try { plan->d_scrunch_list.resize(plan->dm_count); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } try { plan->d_scrunch_list = plan->scrunch_list; } catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); } return DEDISP_NO_ERROR; } // ------------------------ // Public functions // ---------------- dedisp_error dedisp_create_plan(dedisp_plan* plan_, dedisp_size nchans, dedisp_float dt, dedisp_float f0, dedisp_float df) { // Initialise to NULL for safety *plan_ = 0; if( cudaGetLastError() != cudaSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } int device_idx; cudaGetDevice(&device_idx); // Check for parameter errors if( nchans > DEDISP_MAX_NCHANS ) { throw_error(DEDISP_NCHANS_EXCEEDS_LIMIT); } // Force the df parameter to be negative such that // freq[chan] = f0 + chan * df. df = -abs(df); dedisp_plan plan = new dedisp_plan_struct(); if( !plan ) { throw_error(DEDISP_MEM_ALLOC_FAILED); } plan->dm_count = 0; plan->nchans = nchans; plan->gulp_size = DEDISP_DEFAULT_GULP_SIZE; plan->max_delay = 0; plan->dt = dt; plan->f0 = f0; plan->df = df; //plan->stream = 0; // Generate delay table and copy to device memory // Note: The DM factor is left out and applied during dedispersion plan->delay_table.resize(plan->nchans); generate_delay_table(&plan->delay_table[0], plan->nchans, dt, f0, df); try { plan->d_delay_table.resize(plan->nchans); } catch(...) { dedisp_destroy_plan(plan); throw_error(DEDISP_MEM_ALLOC_FAILED); } try { plan->d_delay_table = plan->delay_table; } catch(...) { dedisp_destroy_plan(plan); throw_error(DEDISP_MEM_COPY_FAILED); } // Initialise the killmask plan->killmask.resize(plan->nchans, (dedisp_bool)true); try { plan->d_killmask.resize(plan->nchans); } catch(...) { dedisp_destroy_plan(plan); throw_error(DEDISP_MEM_ALLOC_FAILED); } dedisp_error err = dedisp_set_killmask(plan, (dedisp_bool*)0); if( err != DEDISP_NO_ERROR ) { dedisp_destroy_plan(plan); throw_error(err); } *plan_ = plan; return DEDISP_NO_ERROR; } dedisp_error dedisp_set_gulp_size(dedisp_plan plan, dedisp_size gulp_size) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } plan->gulp_size = gulp_size; return DEDISP_NO_ERROR; } dedisp_size dedisp_get_gulp_size(dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->gulp_size; } dedisp_error dedisp_set_dm_list(dedisp_plan plan, const dedisp_float* dm_list, dedisp_size count) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } if( !dm_list ) { throw_error(DEDISP_INVALID_POINTER); } if( cudaGetLastError() != cudaSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } plan->dm_count = count; plan->dm_list.assign(dm_list, dm_list+count); // Copy to the device try { plan->d_dm_list.resize(plan->dm_count); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } try { plan->d_dm_list = plan->dm_list; } catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); } // Calculate the maximum delay and store it in the plan plan->max_delay = dedisp_size(plan->dm_list[plan->dm_count-1] * plan->delay_table[plan->nchans-1] + 0.5); dedisp_error error = update_scrunch_list(plan); if( error != DEDISP_NO_ERROR ) { throw_error(error); } return DEDISP_NO_ERROR; } dedisp_error dedisp_generate_dm_list(dedisp_plan plan, dedisp_float dm_start, dedisp_float dm_end, dedisp_float ti, dedisp_float tol) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } if( cudaGetLastError() != cudaSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } // Generate the DM list (on the host) plan->dm_list.clear(); generate_dm_list(plan->dm_list, dm_start, dm_end, plan->dt, ti, plan->f0, plan->df, plan->nchans, tol); plan->dm_count = plan->dm_list.size(); // Allocate device memory for the DM list try { plan->d_dm_list.resize(plan->dm_count); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } try { plan->d_dm_list = plan->dm_list; } catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); } // Calculate the maximum delay and store it in the plan plan->max_delay = dedisp_size(plan->dm_list[plan->dm_count-1] * plan->delay_table[plan->nchans-1] + 0.5); dedisp_error error = update_scrunch_list(plan); if( error != DEDISP_NO_ERROR ) { throw_error(error); } return DEDISP_NO_ERROR; } dedisp_float * dedisp_generate_dm_list_guru (dedisp_float dm_start, dedisp_float dm_end, double dt, double ti, double f0, double df, dedisp_size nchans, double tol, dedisp_size * dm_count) { std::vector<dedisp_float> dm_table; generate_dm_list(dm_table, dm_start, dm_end, dt, ti, f0, df, nchans, tol); *dm_count = dm_table.size(); return &dm_table[0]; } dedisp_error dedisp_set_device(int device_idx) { if( cudaGetLastError() != cudaSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } cudaError_t error = cudaSetDevice(device_idx); // Note: cudaErrorInvalidValue isn't a documented return value, but // it still gets returned :/ if( cudaErrorInvalidDevice == error || cudaErrorInvalidValue == error ) throw_error(DEDISP_INVALID_DEVICE_INDEX); else if( cudaErrorSetOnActiveProcess == error ) throw_error(DEDISP_DEVICE_ALREADY_SET); else if( cudaSuccess != error ) throw_error(DEDISP_UNKNOWN_ERROR); else return DEDISP_NO_ERROR; } dedisp_error dedisp_set_killmask(dedisp_plan plan, const dedisp_bool* killmask) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } if( cudaGetLastError() != cudaSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } if( 0 != killmask ) { // Copy killmask to plan (both host and device) plan->killmask.assign(killmask, killmask + plan->nchans); try { plan->d_killmask = plan->killmask; } catch(...) { throw_error(DEDISP_MEM_COPY_FAILED); } } else { // Set the killmask to all true std::fill(plan->killmask.begin(), plan->killmask.end(), (dedisp_bool)true); thrust::fill(plan->d_killmask.begin(), plan->d_killmask.end(), (dedisp_bool)true); } return DEDISP_NO_ERROR; } /* dedisp_plan dedisp_set_stream(dedisp_plan plan, StreamType stream) { plan->stream = stream; return plan; } */ // Getters // ------- dedisp_size dedisp_get_max_delay(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); } return plan->max_delay; } dedisp_size dedisp_get_dm_delay(const dedisp_plan plan, int dm_trial) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); } if (dm_trial < 0 || dm_trial >= plan->dm_count ) { throw_getter_error(DEDISP_UNKNOWN_ERROR,0); } return (plan->dm_list[dm_trial] * plan->delay_table[plan->nchans-1] + 0.5); } dedisp_size dedisp_get_channel_count(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->nchans; } dedisp_size dedisp_get_dm_count(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->dm_count; } const dedisp_float* dedisp_get_dm_list(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); } return &plan->dm_list[0]; } const dedisp_bool* dedisp_get_killmask(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return &plan->killmask[0]; } dedisp_float dedisp_get_dt(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->dt; } dedisp_float dedisp_get_f0(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->f0; } dedisp_float dedisp_get_df(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } return plan->df; } // Warning: Big mother function dedisp_error dedisp_execute_guru(const dedisp_plan plan, dedisp_size nsamps, const dedisp_byte* in, dedisp_size in_nbits, dedisp_size in_stride, dedisp_byte* out, dedisp_size out_nbits, dedisp_size out_stride, dedisp_size first_dm_idx, dedisp_size dm_count, unsigned flags) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } if( cudaGetLastError() != cudaSuccess ) { throw_error(DEDISP_PRIOR_GPU_ERROR); } enum { BITS_PER_BYTE = 8, BYTES_PER_WORD = sizeof(dedisp_word) / sizeof(dedisp_byte) }; dedisp_size out_bytes_per_sample = out_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE); if( 0 == in || 0 == out ) { throw_error(DEDISP_INVALID_POINTER); } // Note: Must be careful with integer division if( in_stride < plan->nchans*in_nbits/(sizeof(dedisp_byte)*BITS_PER_BYTE) || out_stride < (nsamps - plan->max_delay)*out_bytes_per_sample ) { throw_error(DEDISP_INVALID_STRIDE); } if( 0 == plan->dm_count ) { throw_error(DEDISP_NO_DM_LIST_SET); } if( nsamps < plan->max_delay ) { throw_error(DEDISP_TOO_FEW_NSAMPS); } // Check for valid synchronisation flags if( flags & DEDISP_ASYNC && flags & DEDISP_WAIT ) { throw_error(DEDISP_INVALID_FLAG_COMBINATION); } // Check for valid nbits values if( in_nbits != 1 && in_nbits != 2 && in_nbits != 4 && in_nbits != 8 && in_nbits != 16 && in_nbits != 32 ) { throw_error(DEDISP_UNSUPPORTED_IN_NBITS); } if( out_nbits != 8 && out_nbits != 16 && out_nbits != 32 ) { throw_error(DEDISP_UNSUPPORTED_OUT_NBITS); } bool using_host_memory; if( flags & DEDISP_HOST_POINTERS && flags & DEDISP_DEVICE_POINTERS ) { throw_error(DEDISP_INVALID_FLAG_COMBINATION); } else { using_host_memory = !(flags & DEDISP_DEVICE_POINTERS); } // Copy the lookup tables to constant memory on the device // TODO: This was much tidier, but thanks to CUDA's insistence on // breaking its API in v5.0 I had to mess it up like this. cudaMemcpyToSymbolAsync(c_delay_table, thrust::raw_pointer_cast(&plan->d_delay_table[0]), plan->nchans * sizeof(dedisp_float), 0, cudaMemcpyDeviceToDevice, 0); cudaThreadSynchronize(); cudaError_t error = cudaGetLastError(); if( error != cudaSuccess ) { throw_error(DEDISP_MEM_COPY_FAILED); } cudaMemcpyToSymbolAsync(c_killmask, thrust::raw_pointer_cast(&plan->d_killmask[0]), plan->nchans * sizeof(dedisp_bool), 0, cudaMemcpyDeviceToDevice, 0); cudaThreadSynchronize(); error = cudaGetLastError(); if( error != cudaSuccess ) { throw_error(DEDISP_MEM_COPY_FAILED); } // Compute the problem decomposition dedisp_size nsamps_computed = nsamps - plan->max_delay; // Specify the maximum gulp size dedisp_size nsamps_computed_gulp_max; if( using_host_memory ) { nsamps_computed_gulp_max = min(plan->gulp_size, nsamps_computed); } else { // Just do it in one gulp if given device pointers nsamps_computed_gulp_max = nsamps_computed; } // Just to be sure // TODO: This seems quite wrong. Why was it here? /* if( nsamps_computed_gulp_max < plan->max_delay ) { throw_error(DEDISP_TOO_FEW_NSAMPS); } */ // Compute derived counts for maximum gulp size [dedisp_word == 4 bytes] dedisp_size nsamps_gulp_max = nsamps_computed_gulp_max + plan->max_delay; dedisp_size chans_per_word = sizeof(dedisp_word)*BITS_PER_BYTE / in_nbits; dedisp_size nchan_words = plan->nchans / chans_per_word; // We use words for processing but allow arbitrary byte strides, which are // not necessarily friendly. bool friendly_in_stride = (0 == in_stride % BYTES_PER_WORD); // Note: If desired, this could be rounded up, e.g., to a power of 2 dedisp_size in_buf_stride_words = nchan_words; dedisp_size in_count_gulp_max = nsamps_gulp_max * in_buf_stride_words; dedisp_size nsamps_padded_gulp_max = div_round_up(nsamps_computed_gulp_max, DEDISP_SAMPS_PER_THREAD) * DEDISP_SAMPS_PER_THREAD + plan->max_delay; dedisp_size in_count_padded_gulp_max = nsamps_padded_gulp_max * in_buf_stride_words; // TODO: Make this a parameter? dedisp_size min_in_nbits = 0; if( plan->scrunching_enabled ) { // TODO: This produces corrupt output when equal to 32 ! // Also check whether the unpacker is broken when in_nbits=32 ! min_in_nbits = 16; //32; } dedisp_size unpacked_in_nbits = max((int)in_nbits, (int)min_in_nbits); dedisp_size unpacked_chans_per_word = sizeof(dedisp_word)*BITS_PER_BYTE / unpacked_in_nbits; dedisp_size unpacked_nchan_words = plan->nchans / unpacked_chans_per_word; dedisp_size unpacked_buf_stride_words = unpacked_nchan_words; dedisp_size unpacked_count_padded_gulp_max = nsamps_padded_gulp_max * unpacked_buf_stride_words; dedisp_size out_stride_gulp_samples = nsamps_computed_gulp_max; dedisp_size out_stride_gulp_bytes = out_stride_gulp_samples * out_bytes_per_sample; dedisp_size out_count_gulp_max = out_stride_gulp_bytes * dm_count; // Organise device memory pointers // ------------------------------- const dedisp_word* d_in = 0; dedisp_word* d_transposed = 0; dedisp_word* d_unpacked = 0; dedisp_byte* d_out = 0; thrust::device_vector<dedisp_word> d_in_buf; thrust::device_vector<dedisp_word> d_transposed_buf; thrust::device_vector<dedisp_word> d_unpacked_buf; thrust::device_vector<dedisp_byte> d_out_buf; // Allocate temporary buffers on the device where necessary if( using_host_memory || !friendly_in_stride ) { try { d_in_buf.resize(in_count_gulp_max); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } d_in = thrust::raw_pointer_cast(&d_in_buf[0]); } else { d_in = (dedisp_word*)in; } if( using_host_memory ) { try { d_out_buf.resize(out_count_gulp_max); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } d_out = thrust::raw_pointer_cast(&d_out_buf[0]); } else { d_out = out; } //// Note: * 2 here is for the time-scrunched copies of the data try { d_transposed_buf.resize(in_count_padded_gulp_max/* * 2 */); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } d_transposed = thrust::raw_pointer_cast(&d_transposed_buf[0]); // Note: * 2 here is for the time-scrunched copies of the data try { d_unpacked_buf.resize(unpacked_count_padded_gulp_max * 2); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } d_unpacked = thrust::raw_pointer_cast(&d_unpacked_buf[0]); // ------------------------------- // The stride (in words) between differently-scrunched copies of the // unpacked data. dedisp_size scrunch_stride = unpacked_count_padded_gulp_max; #ifdef USE_SUBBAND_ALGORITHM dedisp_size sb_size = DEDISP_DEFAULT_SUBBAND_SIZE; // Note: Setting these two parameters equal should balance the two steps of // the sub-band algorithm. dedisp_size dm_size = sb_size; // Ndm' dedisp_size sb_count = plan->nchans / sb_size; dedisp_size nom_dm_count = dm_count / dm_size; thrust::device_vector<dedisp_word> d_intermediate_buf; try { d_intermediate_buf.resize(nsamps_padded_gulp_max * sb_count * nom_dm_count); } catch(...) { throw_error(DEDISP_MEM_ALLOC_FAILED); } dedisp_word* d_intermediate = thrust::raw_pointer_cast(&d_intermediate_buf[0]); #endif // USE_SUBBAND_ALGORITHM // TODO: Eventually re-implement streams cudaStream_t stream = 0;//(cudaStream_t)plan->stream; #ifdef DEDISP_BENCHMARK Stopwatch copy_to_timer; Stopwatch copy_from_timer; Stopwatch transpose_timer; Stopwatch kernel_timer; #endif // Gulp loop for( dedisp_size gulp_samp_idx=0; gulp_samp_idx<nsamps_computed; gulp_samp_idx+=nsamps_computed_gulp_max ) { dedisp_size nsamps_computed_gulp = min(nsamps_computed_gulp_max, nsamps_computed-gulp_samp_idx); dedisp_size nsamps_gulp = nsamps_computed_gulp + plan->max_delay; dedisp_size nsamps_padded_gulp = div_round_up(nsamps_computed_gulp, DEDISP_SAMPS_PER_THREAD) * DEDISP_SAMPS_PER_THREAD + plan->max_delay; #ifdef DEDISP_BENCHMARK copy_to_timer.start(); #endif // Copy the input data from host to device if necessary if( using_host_memory ) { // Allowing arbitrary byte strides means we must do a strided copy if( !copy_host_to_device_2d((dedisp_byte*)d_in, in_buf_stride_words * BYTES_PER_WORD, in + gulp_samp_idx*in_stride, in_stride, nchan_words * BYTES_PER_WORD, nsamps_gulp) ) { throw_error(DEDISP_MEM_COPY_FAILED); } } else if( !friendly_in_stride ) { // Device pointers with unfriendly stride if( !copy_device_to_device_2d((dedisp_byte*)d_in, in_buf_stride_words * BYTES_PER_WORD, in + gulp_samp_idx*in_stride, in_stride, nchan_words * BYTES_PER_WORD, nsamps_gulp) ) { throw_error(DEDISP_MEM_COPY_FAILED); } } #ifdef DEDISP_BENCHMARK cudaThreadSynchronize(); copy_to_timer.stop(); transpose_timer.start(); #endif // Transpose the words in the input Transpose<dedisp_word> transpose; transpose.transpose(d_in, nchan_words, nsamps_gulp, in_buf_stride_words, nsamps_padded_gulp, d_transposed); #ifdef DEDISP_BENCHMARK cudaThreadSynchronize(); transpose_timer.stop(); kernel_timer.start(); #endif // Unpack the transposed data unpack(d_transposed, nsamps_padded_gulp, nchan_words, d_unpacked, in_nbits, unpacked_in_nbits); // Compute time-scrunched copies of the data if( plan->scrunching_enabled ) { dedisp_size max_scrunch = plan->scrunch_list[plan->dm_count-1]; dedisp_size scrunch_in_offset = 0; dedisp_size scrunch_out_offset = scrunch_stride; for( dedisp_size s=2; s<=max_scrunch; s*=2 ) { // TODO: Need to pass in stride and count? I.e., nsamps_padded/computed_gulp //scrunch_x2(&d_transposed[scrunch_in_offset], // nsamps_padded_gulp/(s/2), nchan_words, in_nbits, // &d_transposed[scrunch_out_offset]); scrunch_x2(&d_unpacked[scrunch_in_offset], nsamps_padded_gulp/(s/2), unpacked_nchan_words, unpacked_in_nbits, &d_unpacked[scrunch_out_offset]); scrunch_in_offset = scrunch_out_offset; scrunch_out_offset += scrunch_stride / s; } } #ifdef USE_SUBBAND_ALGORITHM // TODO: This has not been updated to use d_unpacked! dedisp_size chan_stride = 1; dedisp_size dm_stride = dm_size; dedisp_size ostride = nsamps_padded_gulp * sb_count; dedisp_size batch_size = sb_count; dedisp_size batch_in_stride = nsamps_padded_gulp * sb_size / chans_per_word; dedisp_size batch_dm_stride = 0; dedisp_size batch_chan_stride = sb_size; dedisp_size batch_out_stride = nsamps_padded_gulp; /* // Consistency checks if( (nom_dm_count-1)*dm_stride + (batch_size-1)*batch_dm_stride >= dm_count ) { throw std::runtime_error("DM STRIDES ARE INCONSISTENT"); } if( (sb_size-1)*chan_stride + (batch_size-1)*batch_chan_stride >= plan->nchans ) { throw std::runtime_error("CHAN STRIDES ARE INCONSISTENT"); } */ // Both steps if( !dedisperse(d_transposed, nsamps_padded_gulp, nsamps_computed_gulp, in_nbits, sb_size, chan_stride, thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]), nom_dm_count, dm_stride, (dedisp_byte*)d_intermediate, ostride, 32,//out_nbits, batch_size, batch_in_stride, batch_dm_stride, batch_chan_stride, batch_out_stride) ) { throw_error(DEDISP_INTERNAL_GPU_ERROR); } batch_size = nom_dm_count; chan_stride = sb_size; dm_stride = 1; ostride = out_stride_gulp_samples; batch_in_stride = nsamps_padded_gulp * sb_count; batch_dm_stride = 0; batch_chan_stride = 0; batch_out_stride = out_stride_gulp_samples * dm_size; /* // Consistency checks if( (dm_size-1)*dm_stride + (batch_size-1)*batch_dm_stride >= dm_count ) { throw std::runtime_error("DM STRIDES ARE INCONSISTENT"); } if( (sb_count-1)*chan_stride + (batch_size-1)*batch_chan_stride >= plan->nchans ) { throw std::runtime_error("CHAN STRIDES ARE INCONSISTENT"); } */ if( !dedisperse(d_intermediate, nsamps_padded_gulp, nsamps_computed_gulp, 32,//in_nbits, sb_count, chan_stride, thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]), dm_size, dm_stride, d_out, ostride, out_nbits, batch_size, batch_in_stride, batch_dm_stride, batch_chan_stride, batch_out_stride) ) { throw_error(DEDISP_INTERNAL_GPU_ERROR); } #else // Use direct algorithm if( plan->scrunching_enabled ) { // TODO: THIS WILL NOT WORK IF dm_count < plan->dm_count ! // Need to avoid assumption that scrunch starts at 1 // Must start the scrunch at the first *requested* DM thrust::device_vector<dedisp_float> d_scrunched_dm_list(dm_count); dedisp_size scrunch_start = 0; dedisp_size scrunch_offset = 0; for( dedisp_size s=0; s<dm_count; ++s ) { dedisp_size cur_scrunch = plan->scrunch_list[s]; // Look for segment boundaries if( s+1 == dm_count || plan->scrunch_list[s+1] != cur_scrunch ) { //dedisp_size next_scrunch = plan->scrunch_list[s]; //if( next_scrunch != cur_scrunch ) { dedisp_size scrunch_count = s+1 - scrunch_start; // Make a copy of the dm list divided by the scrunch factor // Note: This has the effect of increasing dt in the delay eqn dedisp_size dm_offset = first_dm_idx + scrunch_start; thrust::transform(plan->d_dm_list.begin() + dm_offset, plan->d_dm_list.begin() + dm_offset + scrunch_count, thrust::make_constant_iterator(cur_scrunch), d_scrunched_dm_list.begin(), thrust::divides<dedisp_float>()); dedisp_float* d_scrunched_dm_list_ptr = thrust::raw_pointer_cast(&d_scrunched_dm_list[0]); // TODO: Is this how the nsamps vars need to change? if( !dedisperse(//&d_transposed[scrunch_offset], &d_unpacked[scrunch_offset], nsamps_padded_gulp / cur_scrunch, nsamps_computed_gulp / cur_scrunch, unpacked_in_nbits, //in_nbits, plan->nchans, 1, d_scrunched_dm_list_ptr, scrunch_count, // dm_count 1, d_out + scrunch_start*out_stride_gulp_bytes, out_stride_gulp_samples, out_nbits, 1, 0, 0, 0, 0) ) { throw_error(DEDISP_INTERNAL_GPU_ERROR); } scrunch_offset += scrunch_stride / cur_scrunch; scrunch_start += scrunch_count; } } } else { // Perform direct dedispersion without scrunching if( !dedisperse(//d_transposed, d_unpacked, nsamps_padded_gulp, nsamps_computed_gulp, unpacked_in_nbits, //in_nbits, plan->nchans, 1, thrust::raw_pointer_cast(&plan->d_dm_list[first_dm_idx]), dm_count, 1, d_out, out_stride_gulp_samples, out_nbits, 1, 0, 0, 0, 0) ) { throw_error(DEDISP_INTERNAL_GPU_ERROR); } } #endif // SB/direct algorithm #ifdef DEDISP_BENCHMARK cudaThreadSynchronize(); kernel_timer.stop(); #endif // Copy output back to host memory if necessary if( using_host_memory ) { dedisp_size gulp_samp_byte_idx = gulp_samp_idx * out_bytes_per_sample; dedisp_size nsamp_bytes_computed_gulp = nsamps_computed_gulp * out_bytes_per_sample; #ifdef DEDISP_BENCHMARK copy_from_timer.start(); #endif if( plan->scrunching_enabled ) { // TODO: This for-loop isn't a very elegant solution dedisp_size scrunch_start = 0; for( dedisp_size s=0; s<dm_count; ++s ) { dedisp_size cur_scrunch = plan->scrunch_list[s]; // Look for segment boundaries if( s+1 == dm_count || plan->scrunch_list[s+1] != cur_scrunch ) { dedisp_size scrunch_count = s+1 - scrunch_start; dedisp_size src_stride = out_stride_gulp_bytes; dedisp_byte* src = d_out + scrunch_start * src_stride; dedisp_byte* dst = (out + scrunch_start * out_stride + gulp_samp_byte_idx / cur_scrunch); dedisp_size width = nsamp_bytes_computed_gulp / cur_scrunch; dedisp_size height = scrunch_count; copy_device_to_host_2d(dst, // dst out_stride, // dst stride src, // src src_stride, // src stride width, // width bytes height); // height scrunch_start += scrunch_count; } } } else { copy_device_to_host_2d(out + gulp_samp_byte_idx, // dst out_stride, // dst stride d_out, // src out_stride_gulp_bytes, // src stride nsamp_bytes_computed_gulp, // width bytes dm_count); // height } #ifdef DEDISP_BENCHMARK cudaThreadSynchronize(); copy_from_timer.stop(); #endif } } // End of gulp loop #ifdef DEDISP_BENCHMARK cout << "Copy to time: " << copy_to_timer.getTime() << endl; cout << "Copy from time: " << copy_from_timer.getTime() << endl; cout << "Transpose time: " << transpose_timer.getTime() << endl; cout << "Kernel time: " << kernel_timer.getTime() << endl; float total_time = copy_to_timer.getTime() + copy_from_timer.getTime() + transpose_timer.getTime() + kernel_timer.getTime(); cout << "Total time: " << total_time << endl; // Append the timing results to a log file std::ofstream perf_file("perf.log", std::ios::app); perf_file << copy_to_timer.getTime() << "\t" << copy_from_timer.getTime() << "\t" << transpose_timer.getTime() << "\t" << kernel_timer.getTime() << "\t" << total_time << endl; perf_file.close(); #endif if( !(flags & DEDISP_ASYNC) ) { cudaStreamSynchronize(stream); } // Phew! return DEDISP_NO_ERROR; } dedisp_error dedisp_execute_adv(const dedisp_plan plan, dedisp_size nsamps, const dedisp_byte* in, dedisp_size in_nbits, dedisp_size in_stride, dedisp_byte* out, dedisp_size out_nbits, dedisp_size out_stride, unsigned flags) { dedisp_size first_dm_idx = 0; dedisp_size dm_count = plan->dm_count; return dedisp_execute_guru(plan, nsamps, in, in_nbits, in_stride, out, out_nbits, out_stride, first_dm_idx, dm_count, flags); } // TODO: Consider having the user specify nsamps_computed instead of nsamps dedisp_error dedisp_execute(const dedisp_plan plan, dedisp_size nsamps, const dedisp_byte* in, dedisp_size in_nbits, dedisp_byte* out, dedisp_size out_nbits, unsigned flags) { enum { BITS_PER_BYTE = 8 }; // Note: The default out_stride is nsamps - plan->max_delay dedisp_size out_bytes_per_sample = out_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE); // Note: Must be careful with integer division dedisp_size in_stride = plan->nchans * in_nbits / (sizeof(dedisp_byte) * BITS_PER_BYTE); dedisp_size out_stride = (nsamps - plan->max_delay) * out_bytes_per_sample; return dedisp_execute_adv(plan, nsamps, in, in_nbits, in_stride, out, out_nbits, out_stride, flags); } dedisp_error dedisp_sync(void) { if( cudaThreadSynchronize() != cudaSuccess ) throw_error(DEDISP_PRIOR_GPU_ERROR); else return DEDISP_NO_ERROR; } void dedisp_destroy_plan(dedisp_plan plan) { if( plan ) { delete plan; } } const char* dedisp_get_error_string(dedisp_error error) { switch( error ) { case DEDISP_NO_ERROR: return "No error"; case DEDISP_MEM_ALLOC_FAILED: return "Memory allocation failed"; case DEDISP_MEM_COPY_FAILED: return "Memory copy failed"; case DEDISP_INVALID_DEVICE_INDEX: return "Invalid device index"; case DEDISP_DEVICE_ALREADY_SET: return "Device is already set and cannot be changed"; case DEDISP_NCHANS_EXCEEDS_LIMIT: return "No. channels exceeds internal limit"; case DEDISP_INVALID_PLAN: return "Invalid plan"; case DEDISP_INVALID_POINTER: return "Invalid pointer"; case DEDISP_INVALID_STRIDE: return "Invalid stride"; case DEDISP_NO_DM_LIST_SET: return "No DM list has been set"; case DEDISP_TOO_FEW_NSAMPS: return "No. samples < maximum delay"; case DEDISP_INVALID_FLAG_COMBINATION: return "Invalid flag combination"; case DEDISP_UNSUPPORTED_IN_NBITS: return "Unsupported in_nbits value"; case DEDISP_UNSUPPORTED_OUT_NBITS: return "Unsupported out_nbits value"; case DEDISP_PRIOR_GPU_ERROR: return "Prior GPU error."; case DEDISP_INTERNAL_GPU_ERROR: return "Internal GPU error. Please contact the author(s)."; case DEDISP_UNKNOWN_ERROR: return "Unknown error. Please contact the author(s)."; default: return "Invalid error code"; } } dedisp_error dedisp_enable_adaptive_dt(dedisp_plan plan, dedisp_float pulse_width, dedisp_float tol) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } plan->scrunching_enabled = true; plan->pulse_width = pulse_width; plan->scrunch_tol = tol; return update_scrunch_list(plan); } dedisp_error dedisp_disable_adaptive_dt(dedisp_plan plan) { if( !plan ) { throw_error(DEDISP_INVALID_PLAN); } plan->scrunching_enabled = false; return update_scrunch_list(plan); } dedisp_bool dedisp_using_adaptive_dt(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,false); } return plan->scrunching_enabled; } const dedisp_size* dedisp_get_dt_factors(const dedisp_plan plan) { if( !plan ) { throw_getter_error(DEDISP_INVALID_PLAN,0); } if( 0 == plan->dm_count ) { throw_getter_error(DEDISP_NO_DM_LIST_SET,0); } return &plan->scrunch_list[0]; } // ----------------
bbd5396cce9090e302609e020064a512e3b3c389.hip
// !!! This is a file automatically generated by hipify!!! #include <call_kernel.h> //xfail:BUGLE_ERROR //--gridDim=1 --blockDim=32 --no-inline //This kernel is not-racy: memset is called with variable value. //#define memset(dst,val,len) __builtin_memset(dst,val,len) #define N 2//32 #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> __device__ int bar(void){ int value; return value; } __global__ void kernel(uint4 *out) { uint4 vector; int val = bar(); memset(&vector, val, 16); out[threadIdx.x] = vector; /**/ } int main(){ uint4 *a; uint4 *dev_a; int size = N*sizeof(uint4); a = (uint4*)malloc(size); /* initialization of a */ for (int i = 0; i < N; i++) { a[i].x = i; a[i].y = i; a[i].z = i, a[i].w = i; } hipMalloc((void**)&dev_a, size); hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice); /* printf("a:\n"); for (int i = 0; i < N; i++) printf("a[%d].x : %d \ta[%d].y : %d\ta[%d].z : %d\ta[%d].w : %d\n", i, a[i].x, i, a[i].y, i, a[i].z, i, a[i].w); */ //kernel<<<1,N>>>(dev_a); ESBMC_verify_kernel_u(kernel,1,N,dev_a); hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost); printf("new a:\n"); for (int i = 0; i < N; i++) { /* printf("a[%d].x : %d \ta[%d].y : %d\ta[%d].z : %d\ta[%d].w : %d\n", i, a[i].x, i, a[i].y, i, a[i].z, i, a[i].w); */ assert(a[i].x == 0); assert(a[i].y == 0); assert(a[i].z == 0); assert(a[i].w == 0); } hipFree(dev_a); free(a); return 0; }
bbd5396cce9090e302609e020064a512e3b3c389.cu
#include <call_kernel.h> //xfail:BUGLE_ERROR //--gridDim=1 --blockDim=32 --no-inline //This kernel is not-racy: memset is called with variable value. //#define memset(dst,val,len) __builtin_memset(dst,val,len) #define N 2//32 #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> __device__ int bar(void){ int value; return value; } __global__ void kernel(uint4 *out) { uint4 vector; int val = bar(); memset(&vector, val, 16); out[threadIdx.x] = vector; /**/ } int main(){ uint4 *a; uint4 *dev_a; int size = N*sizeof(uint4); a = (uint4*)malloc(size); /* initialization of a */ for (int i = 0; i < N; i++) { a[i].x = i; a[i].y = i; a[i].z = i, a[i].w = i; } cudaMalloc((void**)&dev_a, size); cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice); /* printf("a:\n"); for (int i = 0; i < N; i++) printf("a[%d].x : %d \ta[%d].y : %d\ta[%d].z : %d\ta[%d].w : %d\n", i, a[i].x, i, a[i].y, i, a[i].z, i, a[i].w); */ //kernel<<<1,N>>>(dev_a); ESBMC_verify_kernel_u(kernel,1,N,dev_a); cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost); printf("new a:\n"); for (int i = 0; i < N; i++) { /* printf("a[%d].x : %d \ta[%d].y : %d\ta[%d].z : %d\ta[%d].w : %d\n", i, a[i].x, i, a[i].y, i, a[i].z, i, a[i].w); */ assert(a[i].x == 0); assert(a[i].y == 0); assert(a[i].z == 0); assert(a[i].w == 0); } cudaFree(dev_a); free(a); return 0; }
11e1b2dc22c1b5da9582d2c34f76aae22306ec03.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpuerrchk.cuh" #include "real.h" #define SECTION_SIZE 1024 //input size=SECTION_SIZE is twice block size, so that we can have 2 threads per element to be summed __global__ void ch8_in_scan_kernel(real* X, real* Y, int inputsize){ __shared__ real XY[SECTION_SIZE]; int i=2*blockIdx.x*blockDim.x+threadIdx.x; if (i < inputsize) XY[threadIdx.x]=X[i]; if (i + blockDim.x <inputsize) XY[threadIdx.x+blockDim.x]=X[i+blockDim.x]; //up-sweep for (int stride=1; stride <= blockDim.x; stride*=2){ __syncthreads(); int index= 2*(threadIdx.x+1)*stride -1; if (index< SECTION_SIZE) XY[index]+=XY[index-stride]; } XY[SECTION_SIZE-1]=0; //down-sweep for (int stride=blockDim.x; stride>=1; stride/=2){ __syncthreads(); int index = 2*(threadIdx.x+1)*stride-1; if (index < SECTION_SIZE){ real temp=XY[index-stride]; XY[index-stride]=XY[index]; XY[index]+=temp; } } __syncthreads(); if (i< inputsize) Y[i]= XY[threadIdx.x]; if (i+blockDim.x < inputsize) Y[i+blockDim.x]=XY[threadIdx.x+blockDim.x]; } void ch8_in_scan(real* d_X, real* d_Y,int inputsize){ hipLaunchKernelGGL(( ch8_in_scan_kernel), dim3(1),dim3(SECTION_SIZE/2), 0, 0, d_X,d_Y,inputsize); gpuErrchk(hipPeekAtLastError()); }
11e1b2dc22c1b5da9582d2c34f76aae22306ec03.cu
#include "gpuerrchk.cuh" #include "real.h" #define SECTION_SIZE 1024 //input size=SECTION_SIZE is twice block size, so that we can have 2 threads per element to be summed __global__ void ch8_in_scan_kernel(real* X, real* Y, int inputsize){ __shared__ real XY[SECTION_SIZE]; int i=2*blockIdx.x*blockDim.x+threadIdx.x; if (i < inputsize) XY[threadIdx.x]=X[i]; if (i + blockDim.x <inputsize) XY[threadIdx.x+blockDim.x]=X[i+blockDim.x]; //up-sweep for (int stride=1; stride <= blockDim.x; stride*=2){ __syncthreads(); int index= 2*(threadIdx.x+1)*stride -1; if (index< SECTION_SIZE) XY[index]+=XY[index-stride]; } XY[SECTION_SIZE-1]=0; //down-sweep for (int stride=blockDim.x; stride>=1; stride/=2){ __syncthreads(); int index = 2*(threadIdx.x+1)*stride-1; if (index < SECTION_SIZE){ real temp=XY[index-stride]; XY[index-stride]=XY[index]; XY[index]+=temp; } } __syncthreads(); if (i< inputsize) Y[i]= XY[threadIdx.x]; if (i+blockDim.x < inputsize) Y[i+blockDim.x]=XY[threadIdx.x+blockDim.x]; } void ch8_in_scan(real* d_X, real* d_Y,int inputsize){ ch8_in_scan_kernel<<<1,SECTION_SIZE/2>>>(d_X,d_Y,inputsize); gpuErrchk(cudaPeekAtLastError()); }
a3a2e0922ee41bbf110ab5e5cad673374d3b4545.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } template <typename Dtype> void ReidPrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { ReidBatch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty"); // CHECK CHECK_EQ(top[0]->count(), batch->data_.count()*2); // Reshape to loaded data. top[0]->Reshape(batch->data_.num()*2, batch->data_.channels(), batch->data_.height(), batch->data_.width()); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); caffe_copy(batch->datap_.count(), batch->datap_.gpu_data(), top[0]->mutable_gpu_data()+batch->data_.count()); if (this->output_labels_) { // Reshape to loaded labels. vector<int> shape = batch->label_.shape(); CHECK_LT(shape.size(), 2); CHECK_EQ(top[1]->count(), batch->label_.count()*2); shape[0] *= 2; top[1]->Reshape(shape); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); caffe_copy(batch->labelp_.count(), batch->labelp_.gpu_data(), top[1]->mutable_gpu_data()+batch->label_.count()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } template <typename Dtype> void MsPrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { MsBatch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { for (int nn = 0; nn < batch->labels_.size(); nn++) { // Reshape to loaded labels. top[nn+1]->ReshapeLike(*batch->labels_[nn]); // Copy the labels. caffe_copy(batch->labels_[nn]->count(), batch->labels_[nn]->gpu_data(), top[nn+1]->mutable_gpu_data()); } } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(hipStreamSynchronize(hipStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); INSTANTIATE_LAYER_GPU_FORWARD(ReidPrefetchingDataLayer); INSTANTIATE_LAYER_GPU_FORWARD(MsPrefetchingDataLayer); } // namespace caffe
a3a2e0922ee41bbf110ab5e5cad673374d3b4545.cu
#include <vector> #include "caffe/layers/base_data_layer.hpp" namespace caffe { template <typename Dtype> void BasePrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. top[1]->ReshapeLike(batch->label_); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } template <typename Dtype> void ReidPrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { ReidBatch<Dtype>* batch = this->prefetch_full_.pop("Data layer prefetch queue empty"); // CHECK CHECK_EQ(top[0]->count(), batch->data_.count()*2); // Reshape to loaded data. top[0]->Reshape(batch->data_.num()*2, batch->data_.channels(), batch->data_.height(), batch->data_.width()); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); caffe_copy(batch->datap_.count(), batch->datap_.gpu_data(), top[0]->mutable_gpu_data()+batch->data_.count()); if (this->output_labels_) { // Reshape to loaded labels. vector<int> shape = batch->label_.shape(); CHECK_LT(shape.size(), 2); CHECK_EQ(top[1]->count(), batch->label_.count()*2); shape[0] *= 2; top[1]->Reshape(shape); // Copy the labels. caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); caffe_copy(batch->labelp_.count(), batch->labelp_.gpu_data(), top[1]->mutable_gpu_data()+batch->label_.count()); } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } template <typename Dtype> void MsPrefetchingDataLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { MsBatch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); // Copy the data caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { for (int nn = 0; nn < batch->labels_.size(); nn++) { // Reshape to loaded labels. top[nn+1]->ReshapeLike(*batch->labels_[nn]); // Copy the labels. caffe_copy(batch->labels_[nn]->count(), batch->labels_[nn]->gpu_data(), top[nn+1]->mutable_gpu_data()); } } // Ensure the copy is synchronous wrt the host, so that the next batch isn't // copied in meanwhile. CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); INSTANTIATE_LAYER_GPU_FORWARD(ReidPrefetchingDataLayer); INSTANTIATE_LAYER_GPU_FORWARD(MsPrefetchingDataLayer); } // namespace caffe
b98c76a3040cdf72e29f3e1a610a62e3acffb86b.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/system/hip/execution_policy.h> #include "math.h" #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "GELU.h" namespace cuBERT { template <typename T> struct gelu_functor { __device__ T operator()(const T& x) const { float _x = (float) x; float _y = _x * 0.5f * (1.0f + erff(_x * sqrtf(0.5f))); return T(_y); } }; template <typename T> __host__ void gelu(size_t N, T *inout_gpu, void *stream) { thrust::device_ptr<T> dev_ptr(inout_gpu); thrust::transform(thrust::hip::par.on((hipStream_t) stream), dev_ptr, dev_ptr + N, dev_ptr, gelu_functor<T>()); } template __host__ void gelu<float>(size_t N, float *inout_gpu, void *stream); template __host__ void gelu<half>(size_t N, half *inout_gpu, void *stream); }
b98c76a3040cdf72e29f3e1a610a62e3acffb86b.cu
#include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/system/cuda/execution_policy.h> #include "math.h" #include <cuda_runtime.h> #include <cuda_fp16.h> #include "GELU.h" namespace cuBERT { template <typename T> struct gelu_functor { __device__ T operator()(const T& x) const { float _x = (float) x; float _y = _x * 0.5f * (1.0f + erff(_x * sqrtf(0.5f))); return T(_y); } }; template <typename T> __host__ void gelu(size_t N, T *inout_gpu, void *stream) { thrust::device_ptr<T> dev_ptr(inout_gpu); thrust::transform(thrust::cuda::par.on((cudaStream_t) stream), dev_ptr, dev_ptr + N, dev_ptr, gelu_functor<T>()); } template __host__ void gelu<float>(size_t N, float *inout_gpu, void *stream); template __host__ void gelu<half>(size_t N, half *inout_gpu, void *stream); }
54b7a3f6f981501a0a12b36306e4ed6363ba53d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 17.05.2018 // @author [email protected] // #include <ops/declarable/helpers/percentile.h> #include <NDArrayFactory.h> #include <helpers/ConstantTadHelper.h> #include <helpers/DebugHelper.h> #include "ResultSet.h" namespace nd4j { namespace ops { namespace helpers { template <typename X> static _CUDA_G void percentileKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, const Nd4jLong numTads, const Nd4jLong tadLength, void *vz, Nd4jLong *zShapeInfo, const Nd4jLong zLength, const Nd4jLong position) { for (int t = blockIdx.x; t < numTads; t += gridDim.x) { auto x = reinterpret_cast<X*>(vx) + xTadOffsets[t]; auto z = reinterpret_cast<X*>(vz); // sort tad if (tadLength > 1) { for (int m = 0; m < tadLength; m++) { if (m % 2 == 0) { for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) { auto top = 2 * tid + 1; if (top < tadLength) { auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo); auto t1 = shape::getIndexOffset(top, xTadShapeInfo); if (x[t0] > x[t1]) { //swap values X dz0 = x[t0]; x[t0] = x[t1]; x[t1] = dz0; } } } } else { for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) { auto top = 2 * tid + 2; if (top < tadLength) { auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo); auto t1 = shape::getIndexOffset(top, xTadShapeInfo); if (x[t0] > x[t1]) { //swap values X dz0 = x[t0]; x[t0] = x[t1]; x[t1] = dz0; } } } } __syncthreads(); } } // saving final value if (threadIdx.x == 0) z[shape::getIndexOffset(t, zShapeInfo)] = x[shape::getIndexOffset(position, xTadShapeInfo)]; __syncthreads(); } } template <typename T> static void _percentile(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axis, const float q, const int interpolation) { const int inputRank = input.rankOf(); if(axis.empty()) for(int i=0; i<inputRank; ++i) axis.push_back(i); else shape::checkDimensions(inputRank, axis); auto tempArray = input.dup(input.ordering()); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(tempArray.getShapeInfo(), axis); auto tadLength = shape::length(packX.primaryShapeInfo()); const float fraction = 1.f - q / 100.; Nd4jLong position = 0; switch(interpolation) { case 0: // lower position = static_cast<Nd4jLong>(math::nd4j_ceil<float,T>((tadLength - 1) * fraction)); break; case 1: // higher position = static_cast<Nd4jLong>(math::nd4j_floor<float,T>((tadLength - 1) * fraction)); break; case 2: // nearest position = static_cast<Nd4jLong>(math::nd4j_round<float,T>((tadLength - 1) * fraction)); break; } position = tadLength - position - 1; hipLaunchKernelGGL(( percentileKernel<T>), dim3(256), dim3(512), 1024, *context->getCudaStream(), tempArray.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), packX.numberOfTads(), tadLength, output.specialBuffer(), output.specialShapeInfo(), output.lengthOf(), position); nd4j::DebugHelper::checkErrorCode(context->getCudaStream(), "percentile"); } void percentile(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation) { NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), _percentile, (context, input, output, axises, q, interpolation), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); } BUILD_SINGLE_TEMPLATE(template void _percentile, (nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation), LIBND4J_TYPES); } } }
54b7a3f6f981501a0a12b36306e4ed6363ba53d0.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 17.05.2018 // @author [email protected] // #include <ops/declarable/helpers/percentile.h> #include <NDArrayFactory.h> #include <helpers/ConstantTadHelper.h> #include <helpers/DebugHelper.h> #include "ResultSet.h" namespace nd4j { namespace ops { namespace helpers { template <typename X> static _CUDA_G void percentileKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, const Nd4jLong numTads, const Nd4jLong tadLength, void *vz, Nd4jLong *zShapeInfo, const Nd4jLong zLength, const Nd4jLong position) { for (int t = blockIdx.x; t < numTads; t += gridDim.x) { auto x = reinterpret_cast<X*>(vx) + xTadOffsets[t]; auto z = reinterpret_cast<X*>(vz); // sort tad if (tadLength > 1) { for (int m = 0; m < tadLength; m++) { if (m % 2 == 0) { for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) { auto top = 2 * tid + 1; if (top < tadLength) { auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo); auto t1 = shape::getIndexOffset(top, xTadShapeInfo); if (x[t0] > x[t1]) { //swap values X dz0 = x[t0]; x[t0] = x[t1]; x[t1] = dz0; } } } } else { for (int tid = threadIdx.x; tid < tadLength; tid += blockDim.x) { auto top = 2 * tid + 2; if (top < tadLength) { auto t0 = shape::getIndexOffset(top - 1, xTadShapeInfo); auto t1 = shape::getIndexOffset(top, xTadShapeInfo); if (x[t0] > x[t1]) { //swap values X dz0 = x[t0]; x[t0] = x[t1]; x[t1] = dz0; } } } } __syncthreads(); } } // saving final value if (threadIdx.x == 0) z[shape::getIndexOffset(t, zShapeInfo)] = x[shape::getIndexOffset(position, xTadShapeInfo)]; __syncthreads(); } } template <typename T> static void _percentile(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axis, const float q, const int interpolation) { const int inputRank = input.rankOf(); if(axis.empty()) for(int i=0; i<inputRank; ++i) axis.push_back(i); else shape::checkDimensions(inputRank, axis); auto tempArray = input.dup(input.ordering()); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(tempArray.getShapeInfo(), axis); auto tadLength = shape::length(packX.primaryShapeInfo()); const float fraction = 1.f - q / 100.; Nd4jLong position = 0; switch(interpolation) { case 0: // lower position = static_cast<Nd4jLong>(math::nd4j_ceil<float,T>((tadLength - 1) * fraction)); break; case 1: // higher position = static_cast<Nd4jLong>(math::nd4j_floor<float,T>((tadLength - 1) * fraction)); break; case 2: // nearest position = static_cast<Nd4jLong>(math::nd4j_round<float,T>((tadLength - 1) * fraction)); break; } position = tadLength - position - 1; percentileKernel<T><<<256, 512, 1024, *context->getCudaStream()>>>(tempArray.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), packX.numberOfTads(), tadLength, output.specialBuffer(), output.specialShapeInfo(), output.lengthOf(), position); nd4j::DebugHelper::checkErrorCode(context->getCudaStream(), "percentile"); } void percentile(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation) { NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), _percentile, (context, input, output, axises, q, interpolation), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); } BUILD_SINGLE_TEMPLATE(template void _percentile, (nd4j::LaunchContext * context, const NDArray& input, NDArray& output, std::vector<int>& axises, const float q, const int interpolation), LIBND4J_TYPES); } } }
b9061cebedaf55c0f1dee2e80f716a0d3d714e5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/cos_sim_functor.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { namespace math { template <typename T> __global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x, const T* y, const T* z, const T* dz, const size_t rows, const size_t cols, T* dy) { int grid_size = blockDim.x * gridDim.x; T y_norm_data = y_norm[0]; for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows; row_id += grid_size) { T xy_norm_prod = x_norm[row_id] * y_norm_data; T dz_data = dz[row_id]; T z_data = z[row_id]; const T* x_data = x + cols * row_id; T reciprocal_xy_norm_prod = 1 / xy_norm_prod; T y_norm_square = y_norm_data * y_norm_data; T reciprocal_y_norm_square = 1 / y_norm_square; for (size_t i = 0; i < cols; ++i) { T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod - z_data * y[i] * reciprocal_y_norm_square); platform::CudaAtomicAdd(dy + i, dy_data); } } } template <typename T> struct CosSimDyFunctor<phi::GPUContext, T> { void operator()(const phi::GPUContext& ctx, const T* x_norm, const T* y_norm, const T* x, const T* y, const T* z, const T* dz, const size_t rows, const size_t cols, T* dy) const { const int block_size = 512; dim3 threads(block_size, 1); dim3 grid((rows + block_size - 1) / block_size, 1); hipLaunchKernelGGL(( CosSimDyKernel<T>), dim3(grid), dim3(threads), 0, ctx.stream(), x_norm, y_norm, x, y, z, dz, rows, cols, dy); } }; template struct CosSimDyFunctor<phi::GPUContext, float>; template struct CosSimDyFunctor<phi::GPUContext, double>; } // namespace math } // namespace operators } // namespace paddle
b9061cebedaf55c0f1dee2e80f716a0d3d714e5b.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/cos_sim_functor.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { namespace math { template <typename T> __global__ void CosSimDyKernel(const T* x_norm, const T* y_norm, const T* x, const T* y, const T* z, const T* dz, const size_t rows, const size_t cols, T* dy) { int grid_size = blockDim.x * gridDim.x; T y_norm_data = y_norm[0]; for (int row_id = blockIdx.x * blockDim.x + threadIdx.x; row_id < rows; row_id += grid_size) { T xy_norm_prod = x_norm[row_id] * y_norm_data; T dz_data = dz[row_id]; T z_data = z[row_id]; const T* x_data = x + cols * row_id; T reciprocal_xy_norm_prod = 1 / xy_norm_prod; T y_norm_square = y_norm_data * y_norm_data; T reciprocal_y_norm_square = 1 / y_norm_square; for (size_t i = 0; i < cols; ++i) { T dy_data = dz_data * (x_data[i] * reciprocal_xy_norm_prod - z_data * y[i] * reciprocal_y_norm_square); platform::CudaAtomicAdd(dy + i, dy_data); } } } template <typename T> struct CosSimDyFunctor<phi::GPUContext, T> { void operator()(const phi::GPUContext& ctx, const T* x_norm, const T* y_norm, const T* x, const T* y, const T* z, const T* dz, const size_t rows, const size_t cols, T* dy) const { const int block_size = 512; dim3 threads(block_size, 1); dim3 grid((rows + block_size - 1) / block_size, 1); CosSimDyKernel<T><<<grid, threads, 0, ctx.stream()>>>( x_norm, y_norm, x, y, z, dz, rows, cols, dy); } }; template struct CosSimDyFunctor<phi::GPUContext, float>; template struct CosSimDyFunctor<phi::GPUContext, double>; } // namespace math } // namespace operators } // namespace paddle
e6852ecafe4a6055f369b37070692afdae31b660.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "energy_accumulation.hpp" #include "gpu_utils.cuh" #include "k_nonbonded_pair_list.cuh" #include "kernels/kernel_utils.cuh" #include "math_utils.cuh" #include "nonbonded_pair_list.hpp" #include <stdexcept> #include <vector> namespace timemachine { template <typename RealType, bool Negated> NonbondedPairList<RealType, Negated>::NonbondedPairList( const std::vector<int> &pair_idxs, // [M, 2] const std::vector<double> &scales, // [M, 2] const double beta, const double cutoff) : M_(pair_idxs.size() / 2), beta_(beta), cutoff_(cutoff) { if (pair_idxs.size() % 2 != 0) { throw std::runtime_error("pair_idxs.size() must be even, but got " + std::to_string(pair_idxs.size())); } for (int i = 0; i < M_; i++) { auto src = pair_idxs[i * 2 + 0]; auto dst = pair_idxs[i * 2 + 1]; if (src == dst) { throw std::runtime_error( "illegal pair with src == dst: " + std::to_string(src) + ", " + std::to_string(dst)); } } if (scales.size() / 2 != M_) { throw std::runtime_error( "expected same number of pairs and scale tuples, but got " + std::to_string(M_) + " != " + std::to_string(scales.size() / 2)); } cudaSafeMalloc(&d_u_buffer_, M_ * sizeof(*d_u_buffer_)); cudaSafeMalloc(&d_pair_idxs_, M_ * 2 * sizeof(*d_pair_idxs_)); gpuErrchk(hipMemcpy(d_pair_idxs_, &pair_idxs[0], M_ * 2 * sizeof(*d_pair_idxs_), hipMemcpyHostToDevice)); cudaSafeMalloc(&d_scales_, M_ * 2 * sizeof(*d_scales_)); gpuErrchk(hipMemcpy(d_scales_, &scales[0], M_ * 2 * sizeof(*d_scales_), hipMemcpyHostToDevice)); }; template <typename RealType, bool Negated> NonbondedPairList<RealType, Negated>::~NonbondedPairList() { gpuErrchk(hipFree(d_pair_idxs_)); gpuErrchk(hipFree(d_scales_)); gpuErrchk(hipFree(d_u_buffer_)); }; template <typename RealType, bool Negated> void NonbondedPairList<RealType, Negated>::execute_device( const int N, const int P, const double *d_x, const double *d_p, const double *d_box, unsigned long long *d_du_dx, unsigned long long *d_du_dp, __int128 *d_u, hipStream_t stream) { if (M_ > 0) { const int tpb = DEFAULT_THREADS_PER_BLOCK; const int num_blocks_pairs = ceil_divide(M_, tpb); hipLaunchKernelGGL(( k_nonbonded_pair_list<RealType, Negated>), dim3(num_blocks_pairs), dim3(tpb), 0, stream, M_, d_x, d_p, d_box, d_pair_idxs_, d_scales_, beta_, cutoff_, d_du_dx, d_du_dp, d_u == nullptr ? nullptr : d_u_buffer_); gpuErrchk(hipPeekAtLastError()); if (d_u) { accumulate_energy(M_, d_u_buffer_, d_u, stream); } } } // TODO: this implementation is duplicated from NonbondedAllPairs template <typename RealType, bool Negated> void NonbondedPairList<RealType, Negated>::du_dp_fixed_to_float( const int N, const int P, const unsigned long long *du_dp, double *du_dp_float) { for (int i = 0; i < N; i++) { const int idx = i * PARAMS_PER_ATOM; const int idx_charge = idx + PARAM_OFFSET_CHARGE; const int idx_sig = idx + PARAM_OFFSET_SIG; const int idx_eps = idx + PARAM_OFFSET_EPS; const int idx_w = idx + PARAM_OFFSET_W; du_dp_float[idx_charge] = FIXED_TO_FLOAT_DU_DP<double, FIXED_EXPONENT_DU_DCHARGE>(du_dp[idx_charge]); du_dp_float[idx_sig] = FIXED_TO_FLOAT_DU_DP<double, FIXED_EXPONENT_DU_DSIG>(du_dp[idx_sig]); du_dp_float[idx_eps] = FIXED_TO_FLOAT_DU_DP<double, FIXED_EXPONENT_DU_DEPS>(du_dp[idx_eps]); du_dp_float[idx_w] = FIXED_TO_FLOAT_DU_DP<double, FIXED_EXPONENT_DU_DW>(du_dp[idx_w]); } } template class NonbondedPairList<double, true>; template class NonbondedPairList<float, true>; template class NonbondedPairList<double, false>; template class NonbondedPairList<float, false>; } // namespace timemachine
e6852ecafe4a6055f369b37070692afdae31b660.cu
#include "energy_accumulation.hpp" #include "gpu_utils.cuh" #include "k_nonbonded_pair_list.cuh" #include "kernels/kernel_utils.cuh" #include "math_utils.cuh" #include "nonbonded_pair_list.hpp" #include <stdexcept> #include <vector> namespace timemachine { template <typename RealType, bool Negated> NonbondedPairList<RealType, Negated>::NonbondedPairList( const std::vector<int> &pair_idxs, // [M, 2] const std::vector<double> &scales, // [M, 2] const double beta, const double cutoff) : M_(pair_idxs.size() / 2), beta_(beta), cutoff_(cutoff) { if (pair_idxs.size() % 2 != 0) { throw std::runtime_error("pair_idxs.size() must be even, but got " + std::to_string(pair_idxs.size())); } for (int i = 0; i < M_; i++) { auto src = pair_idxs[i * 2 + 0]; auto dst = pair_idxs[i * 2 + 1]; if (src == dst) { throw std::runtime_error( "illegal pair with src == dst: " + std::to_string(src) + ", " + std::to_string(dst)); } } if (scales.size() / 2 != M_) { throw std::runtime_error( "expected same number of pairs and scale tuples, but got " + std::to_string(M_) + " != " + std::to_string(scales.size() / 2)); } cudaSafeMalloc(&d_u_buffer_, M_ * sizeof(*d_u_buffer_)); cudaSafeMalloc(&d_pair_idxs_, M_ * 2 * sizeof(*d_pair_idxs_)); gpuErrchk(cudaMemcpy(d_pair_idxs_, &pair_idxs[0], M_ * 2 * sizeof(*d_pair_idxs_), cudaMemcpyHostToDevice)); cudaSafeMalloc(&d_scales_, M_ * 2 * sizeof(*d_scales_)); gpuErrchk(cudaMemcpy(d_scales_, &scales[0], M_ * 2 * sizeof(*d_scales_), cudaMemcpyHostToDevice)); }; template <typename RealType, bool Negated> NonbondedPairList<RealType, Negated>::~NonbondedPairList() { gpuErrchk(cudaFree(d_pair_idxs_)); gpuErrchk(cudaFree(d_scales_)); gpuErrchk(cudaFree(d_u_buffer_)); }; template <typename RealType, bool Negated> void NonbondedPairList<RealType, Negated>::execute_device( const int N, const int P, const double *d_x, const double *d_p, const double *d_box, unsigned long long *d_du_dx, unsigned long long *d_du_dp, __int128 *d_u, cudaStream_t stream) { if (M_ > 0) { const int tpb = DEFAULT_THREADS_PER_BLOCK; const int num_blocks_pairs = ceil_divide(M_, tpb); k_nonbonded_pair_list<RealType, Negated><<<num_blocks_pairs, tpb, 0, stream>>>( M_, d_x, d_p, d_box, d_pair_idxs_, d_scales_, beta_, cutoff_, d_du_dx, d_du_dp, d_u == nullptr ? nullptr : d_u_buffer_); gpuErrchk(cudaPeekAtLastError()); if (d_u) { accumulate_energy(M_, d_u_buffer_, d_u, stream); } } } // TODO: this implementation is duplicated from NonbondedAllPairs template <typename RealType, bool Negated> void NonbondedPairList<RealType, Negated>::du_dp_fixed_to_float( const int N, const int P, const unsigned long long *du_dp, double *du_dp_float) { for (int i = 0; i < N; i++) { const int idx = i * PARAMS_PER_ATOM; const int idx_charge = idx + PARAM_OFFSET_CHARGE; const int idx_sig = idx + PARAM_OFFSET_SIG; const int idx_eps = idx + PARAM_OFFSET_EPS; const int idx_w = idx + PARAM_OFFSET_W; du_dp_float[idx_charge] = FIXED_TO_FLOAT_DU_DP<double, FIXED_EXPONENT_DU_DCHARGE>(du_dp[idx_charge]); du_dp_float[idx_sig] = FIXED_TO_FLOAT_DU_DP<double, FIXED_EXPONENT_DU_DSIG>(du_dp[idx_sig]); du_dp_float[idx_eps] = FIXED_TO_FLOAT_DU_DP<double, FIXED_EXPONENT_DU_DEPS>(du_dp[idx_eps]); du_dp_float[idx_w] = FIXED_TO_FLOAT_DU_DP<double, FIXED_EXPONENT_DU_DW>(du_dp[idx_w]); } } template class NonbondedPairList<double, true>; template class NonbondedPairList<float, true>; template class NonbondedPairList<double, false>; template class NonbondedPairList<float, false>; } // namespace timemachine
cbc211a9b4b285bf075a1b91eebeb8bed9e682ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu // Original license: Apache 2.0 // clang-format off // modify from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ #include <ATen/ATen.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <math.h> #include <stdio.h> #include <THH/THHAtomics.cuh> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) namespace { const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const scalar_t map_h = i * dilation_h + offset_h; // const scalar_t map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = deformable_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } namespace detectron2 { void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_im.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.type(), "deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_col2im: %s\n", hipGetErrorString(err)); } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } } // namespace detectron2 template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const float map_h = i * dilation_h + offset_h; // const float map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = dmcn_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; // data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset, scalar_t* grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear( data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * // height_col + h) * width_col + w], mask_req, mval); grad_mask [(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } namespace detectron2 { void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_im.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t* grad_mask_ = grad_mask.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } } } // namespace detectron2
cbc211a9b4b285bf075a1b91eebeb8bed9e682ee.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu // Original license: Apache 2.0 // clang-format off // modify from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ #include <ATen/ATen.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <math.h> #include <stdio.h> #include <THC/THCAtomics.cuh> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) namespace { const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const scalar_t map_h = i * dilation_h + offset_h; // const scalar_t map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = deformable_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } namespace detectron2 { void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::cuda::CUDAGuard device_guard(data_im.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.type(), "deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); deformable_im2col_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); deformable_col2im_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); deformable_col2im_coord_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } } // namespace detectron2 template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const float map_h = i * dilation_h + offset_h; // const float map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = dmcn_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; // data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset, scalar_t* grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear( data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * // height_col + h) * width_col + w], mask_req, mval); grad_mask [(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } namespace detectron2 { void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; at::cuda::CUDAGuard device_guard(data_im.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); modulated_deformable_im2col_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); modulated_deformable_col2im_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t* grad_mask_ = grad_mask.data_ptr<scalar_t>(); modulated_deformable_col2im_coord_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } } } // namespace detectron2
909f341de79e41050d309012b626461088f72353.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "getSufficientComponentNum.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *eigenvalues = NULL; hipMalloc(&eigenvalues, XSIZE*YSIZE); std::size_t *componentNum = NULL; hipMalloc(&componentNum, XSIZE*YSIZE); std::size_t eigenRows = 1; double epsilon = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( getSufficientComponentNum), dim3(gridBlock),dim3(threadBlock), 0, 0, eigenvalues,componentNum,eigenRows,epsilon); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( getSufficientComponentNum), dim3(gridBlock),dim3(threadBlock), 0, 0, eigenvalues,componentNum,eigenRows,epsilon); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( getSufficientComponentNum), dim3(gridBlock),dim3(threadBlock), 0, 0, eigenvalues,componentNum,eigenRows,epsilon); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
909f341de79e41050d309012b626461088f72353.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "getSufficientComponentNum.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *eigenvalues = NULL; cudaMalloc(&eigenvalues, XSIZE*YSIZE); std::size_t *componentNum = NULL; cudaMalloc(&componentNum, XSIZE*YSIZE); std::size_t eigenRows = 1; double epsilon = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); getSufficientComponentNum<<<gridBlock,threadBlock>>>(eigenvalues,componentNum,eigenRows,epsilon); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { getSufficientComponentNum<<<gridBlock,threadBlock>>>(eigenvalues,componentNum,eigenRows,epsilon); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { getSufficientComponentNum<<<gridBlock,threadBlock>>>(eigenvalues,componentNum,eigenRows,epsilon); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
949d3d0d247d03b19bb5fae707f702316e93887d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/lrn.h> #include <Status.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { template <typename T> static _CUDA_G void lrnKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) { extern __shared__ char sharedChar[]; __shared__ T* shared; if (threadIdx.x == 0) shared = reinterpret_cast<T*>(sharedChar); __syncthreads(); auto xEws = shape::elementWiseStride(xTadShapeInfo); auto zEws = shape::elementWiseStride(zTadShapeInfo); auto xOrder = shape::order(xTadShapeInfo); auto zOrder = shape::order(zTadShapeInfo); const T tbias = static_cast<T>(bias); const T tbeta = static_cast<T>(beta); const T talpha = static_cast<T>(alpha); for (uint i = blockIdx.x; i < numTads; i += gridDim.x) { auto x = reinterpret_cast<T*>(vx) + xTadOffsets[i]; auto z = reinterpret_cast<T*>(vz) + zTadOffsets[i]; // load everything into shared memory shared[threadIdx.x] = x[threadIdx.x * xEws]; __syncthreads(); const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth); const uint last = depth + threadIdx.x + 1; const uint end = nd4j::math::nd4j_min<int>(last, tadLength); T prev = 0.; for (int s = begin; s < end; s++) prev = prev + shared[s] * shared[s]; z[threadIdx.x * zEws] = shared[threadIdx.x] / nd4j::math::nd4j_pow<T, T, T>(tbias + alpha * prev, tbeta); } } template <typename X, typename Z> static _CUDA_G void lrnBPKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) { extern __shared__ char sharedChar[]; __shared__ X* sharedX; __shared__ Z* sharedY; if (threadIdx.x == 0) { sharedX = reinterpret_cast<X*>(sharedChar); sharedY = reinterpret_cast<Z*>(sharedX + blockDim.x); } __syncthreads(); auto xEws = shape::elementWiseStride(xTadShapeInfo); auto zEws = shape::elementWiseStride(zTadShapeInfo); auto xOrder = shape::order(xTadShapeInfo); auto zOrder = shape::order(zTadShapeInfo); const Z tbias = static_cast<Z>(bias); const Z tbeta = static_cast<Z>(beta); const Z talpha = static_cast<Z>(alpha); const Z coeff = talpha * tbeta; for (uint i = blockIdx.x; i < numTads; i += gridDim.x) { auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i]; auto z = reinterpret_cast<Z*>(vz) + zTadOffsets[i]; const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth); const uint last = depth + threadIdx.x + 1; const uint end = nd4j::math::nd4j_min<int>(last, tadLength); // load everything into shared memory sharedX[threadIdx.x] = x[threadIdx.x * xEws]; sharedY[threadIdx.x] = 0.f; __syncthreads(); for (int s = begin; s < end; s++) sharedY[threadIdx.x] = sharedY[threadIdx.x] + sharedX[s] * sharedX[s]; __syncthreads(); Z factor[1024]; Z init = tbias + talpha * sharedY[threadIdx.x]; Z prev = 0.f; for (uint s = begin; s < end; ++s) { factor[s] = nd4j::math::nd4j_pow<Z, Z, Z>(tbias + talpha * sharedY[s], -tbeta - 1); prev = prev + sharedX[s] * factor[s]; } z[threadIdx.x * zEws] = factor[threadIdx.x] * init - 2 * sharedX[threadIdx.x] * coeff * prev; } } template <typename X, typename Z> static void lrnBP_(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) { auto rank = input.rankOf(); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), {rank - 1}); auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), {rank - 1}); const auto tadLength = shape::length(packX.primaryShapeInfo()); const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads()); const int numThreads = tadLength; if (tadLength > 1024 || tadLength < 1) throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet"); hipLaunchKernelGGL(( lrnBPKernel<X, Z>), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(X) + numThreads * sizeof(Z) + 1024, *block.launchContext()->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradI.specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta); gradI.tickWriteDevice(); gradI *= gradO; } void lrnBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) { input.syncToDevice(); gradO.syncToDevice(); BUILD_DOUBLE_SELECTOR(input.dataType(), gradO.dataType(), lrnBP_, (block, input, gradO, gradI, depth, bias, alpha, beta), FLOAT_TYPES, FLOAT_TYPES); gradI.tickWriteDevice(); } template <typename T> static void lrnFunctor_(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) { auto rank = input->rankOf(); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), {rank - 1}); auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {rank - 1}); const auto tadLength = shape::length(packX.primaryShapeInfo()); const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads()); const int numThreads = tadLength; if (tadLength > 1024 || tadLength < 1) throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet"); hipLaunchKernelGGL(( lrnKernel<T>), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(T), *block.launchContext()->getCudaStream(), input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta); } int lrnFunctor(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) { input->syncToDevice(); BUILD_SINGLE_SELECTOR(input->dataType(), lrnFunctor_, (block, input, output, depth, bias, alpha, beta), FLOAT_TYPES); output->tickWriteDevice(); return Status::OK(); } } } }
949d3d0d247d03b19bb5fae707f702316e93887d.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <ops/declarable/helpers/lrn.h> #include <Status.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { template <typename T> static _CUDA_G void lrnKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) { extern __shared__ char sharedChar[]; __shared__ T* shared; if (threadIdx.x == 0) shared = reinterpret_cast<T*>(sharedChar); __syncthreads(); auto xEws = shape::elementWiseStride(xTadShapeInfo); auto zEws = shape::elementWiseStride(zTadShapeInfo); auto xOrder = shape::order(xTadShapeInfo); auto zOrder = shape::order(zTadShapeInfo); const T tbias = static_cast<T>(bias); const T tbeta = static_cast<T>(beta); const T talpha = static_cast<T>(alpha); for (uint i = blockIdx.x; i < numTads; i += gridDim.x) { auto x = reinterpret_cast<T*>(vx) + xTadOffsets[i]; auto z = reinterpret_cast<T*>(vz) + zTadOffsets[i]; // load everything into shared memory shared[threadIdx.x] = x[threadIdx.x * xEws]; __syncthreads(); const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth); const uint last = depth + threadIdx.x + 1; const uint end = nd4j::math::nd4j_min<int>(last, tadLength); T prev = 0.; for (int s = begin; s < end; s++) prev = prev + shared[s] * shared[s]; z[threadIdx.x * zEws] = shared[threadIdx.x] / nd4j::math::nd4j_pow<T, T, T>(tbias + alpha * prev, tbeta); } } template <typename X, typename Z> static _CUDA_G void lrnBPKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) { extern __shared__ char sharedChar[]; __shared__ X* sharedX; __shared__ Z* sharedY; if (threadIdx.x == 0) { sharedX = reinterpret_cast<X*>(sharedChar); sharedY = reinterpret_cast<Z*>(sharedX + blockDim.x); } __syncthreads(); auto xEws = shape::elementWiseStride(xTadShapeInfo); auto zEws = shape::elementWiseStride(zTadShapeInfo); auto xOrder = shape::order(xTadShapeInfo); auto zOrder = shape::order(zTadShapeInfo); const Z tbias = static_cast<Z>(bias); const Z tbeta = static_cast<Z>(beta); const Z talpha = static_cast<Z>(alpha); const Z coeff = talpha * tbeta; for (uint i = blockIdx.x; i < numTads; i += gridDim.x) { auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i]; auto z = reinterpret_cast<Z*>(vz) + zTadOffsets[i]; const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth); const uint last = depth + threadIdx.x + 1; const uint end = nd4j::math::nd4j_min<int>(last, tadLength); // load everything into shared memory sharedX[threadIdx.x] = x[threadIdx.x * xEws]; sharedY[threadIdx.x] = 0.f; __syncthreads(); for (int s = begin; s < end; s++) sharedY[threadIdx.x] = sharedY[threadIdx.x] + sharedX[s] * sharedX[s]; __syncthreads(); Z factor[1024]; Z init = tbias + talpha * sharedY[threadIdx.x]; Z prev = 0.f; for (uint s = begin; s < end; ++s) { factor[s] = nd4j::math::nd4j_pow<Z, Z, Z>(tbias + talpha * sharedY[s], -tbeta - 1); prev = prev + sharedX[s] * factor[s]; } z[threadIdx.x * zEws] = factor[threadIdx.x] * init - 2 * sharedX[threadIdx.x] * coeff * prev; } } template <typename X, typename Z> static void lrnBP_(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) { auto rank = input.rankOf(); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), {rank - 1}); auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), {rank - 1}); const auto tadLength = shape::length(packX.primaryShapeInfo()); const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads()); const int numThreads = tadLength; if (tadLength > 1024 || tadLength < 1) throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet"); lrnBPKernel<X, Z><<<numBlocks, numThreads, numThreads * sizeof(X) + numThreads * sizeof(Z) + 1024, *block.launchContext()->getCudaStream()>>>(input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradI.specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta); gradI.tickWriteDevice(); gradI *= gradO; } void lrnBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) { input.syncToDevice(); gradO.syncToDevice(); BUILD_DOUBLE_SELECTOR(input.dataType(), gradO.dataType(), lrnBP_, (block, input, gradO, gradI, depth, bias, alpha, beta), FLOAT_TYPES, FLOAT_TYPES); gradI.tickWriteDevice(); } template <typename T> static void lrnFunctor_(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) { auto rank = input->rankOf(); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), {rank - 1}); auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {rank - 1}); const auto tadLength = shape::length(packX.primaryShapeInfo()); const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads()); const int numThreads = tadLength; if (tadLength > 1024 || tadLength < 1) throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet"); lrnKernel<T><<<numBlocks, numThreads, numThreads * sizeof(T), *block.launchContext()->getCudaStream()>>>(input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta); } int lrnFunctor(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) { input->syncToDevice(); BUILD_SINGLE_SELECTOR(input->dataType(), lrnFunctor_, (block, input, output, depth, bias, alpha, beta), FLOAT_TYPES); output->tickWriteDevice(); return Status::OK(); } } } }
14acf8feaf566b2d79cc3dc4cc9c5169658ff269.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matadd( int * m0, int * m1, std::size_t w, std::size_t h ) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if( i < w && j < h ) m0[ i * w + j ] += m1[ i * w + j ];// i * w + j; }
14acf8feaf566b2d79cc3dc4cc9c5169658ff269.cu
#include "includes.h" __global__ void matadd( int * m0, int * m1, std::size_t w, std::size_t h ) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if( i < w && j < h ) m0[ i * w + j ] += m1[ i * w + j ];// i * w + j; }
a6ce0cdcd1de65fa12dd06479dd8e68242eeb68f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Fix2d.h" #include "State.h" void __global__ compute_cu(real4 *xs, real4 *vs, real4 *fs, int nAtoms) { int idx = GETIDX(); if (idx < nAtoms) { xs[idx].z = 0; vs[idx].z = 0; fs[idx].z = 0; } } //THIS NEEDS TO GO LAST void Fix2d::compute(int virialMode) { //going to zero z in xs, vs, fs int nAtoms = state->atoms.size(); GPUData &gpd = state->gpd; hipLaunchKernelGGL(( compute_cu), dim3(NBLOCK(nAtoms)), dim3(PERBLOCK), 0, 0, gpd.xs.getDevData(), gpd.vs.getDevData(), gpd.fs.getDevData(), nAtoms); } void export_Fix2d() { boost::python::class_<Fix2d, boost::shared_ptr<Fix2d>, boost::python::bases<Fix> >( "Fix2d", boost::python::init<boost::shared_ptr<State>, std::string, int>( boost::python::args("state", "handle", "applyEvery")) ) ; }
a6ce0cdcd1de65fa12dd06479dd8e68242eeb68f.cu
#include "Fix2d.h" #include "State.h" void __global__ compute_cu(real4 *xs, real4 *vs, real4 *fs, int nAtoms) { int idx = GETIDX(); if (idx < nAtoms) { xs[idx].z = 0; vs[idx].z = 0; fs[idx].z = 0; } } //THIS NEEDS TO GO LAST void Fix2d::compute(int virialMode) { //going to zero z in xs, vs, fs int nAtoms = state->atoms.size(); GPUData &gpd = state->gpd; compute_cu<<<NBLOCK(nAtoms), PERBLOCK>>>(gpd.xs.getDevData(), gpd.vs.getDevData(), gpd.fs.getDevData(), nAtoms); } void export_Fix2d() { boost::python::class_<Fix2d, boost::shared_ptr<Fix2d>, boost::python::bases<Fix> >( "Fix2d", boost::python::init<boost::shared_ptr<State>, std::string, int>( boost::python::args("state", "handle", "applyEvery")) ) ; }
8df3a93feeac5c709c5093921eb15eda025f7090.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "THHTensor.hpp" #include "TH/THHalf.h" #include "THHHalfAutoNumerics.cuh" #include "THHAtomics.cuh" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function subsamples an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output, 1D weight, 1D bias */ template <typename Dtype, typename Acctype> __global__ void subsample(Dtype *input, Dtype *output, Dtype *weight, Dtype *bias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // Get the good mask for (k,i) (k out, i in) Dtype the_weight = weight[k]; // Initialize to the bias Dtype the_bias = bias[k]; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... Dtype *ptr_input = input + yy*dH*input_w + xx*dW; Dtype *ptr_output = output + yy*output_w + xx; Acctype sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = ScalarConvert<Acctype, Dtype>::to(the_weight*sum + the_bias); } } } /* * Description: * this function computes the gradWeight from input and gradOutput */ template <typename Dtype, typename Acctype> __global__ void subgradweight(Dtype *input, Dtype *gradOutput, Dtype *gradWeight, Dtype *gradBias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW, float scale) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; input = input + i*input_w*input_h; // thread ID int tid = blockDim.x*threadIdx.y + threadIdx.x; // create array to hold partial sums __shared__ Acctype sums[CUDA_MAX_THREADS]; sums[tid] = 0; // compute partial sums for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { Dtype *ptr_input = input + yy*dH*input_w + xx*dW; Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx; Dtype z = *ptr_gradOutput; int64_t kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { sums[tid] += z * ptr_input[kx]; } ptr_input += input_w; } } } __syncthreads(); // reduce: accumulate all partial sums to produce final gradWeight if ((threadIdx.x == 0) && (threadIdx.y == 0)) { Acctype scaledSums = Acctype(0); for(int i = 0; i < blockDim.x*blockDim.y; i++) { scaledSums += scale*sums[i]; } gradWeight[k] += ScalarConvert<Acctype, Dtype>::to(scaledSums); } __syncthreads(); // compute gradBias sums[tid] = 0; for (int i=tid; i<output_w*output_h; i+=(blockDim.x*blockDim.y)) { sums[tid] += gradOutput[i]; } __syncthreads(); // reduce gradBias if ((threadIdx.x == 0) && (threadIdx.y == 0)) { Acctype scaledSums = Acctype(0); for (int i=0; i<(blockDim.x*blockDim.y); i++) { scaledSums += scale*sums[i]; } gradBias[k] += ScalarConvert<Acctype, Dtype>::to(scaledSums); } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename Dtype> __global__ void subgradinput(Dtype *gradInput, Dtype *gradOutput, Dtype *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight Dtype the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { Dtype *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx; Dtype z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { // FIXME: should this be done at accreal precision? ptr_gradInput[kx] += z; } ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename Dtype> __global__ void subgradinputAtomic(Dtype *gradInput, Dtype *gradOutput, Dtype *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight Dtype the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { Dtype *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx; Dtype z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { // FIXME: should this be done at accreal precision? atomicAdd(&(ptr_gradInput[kx]), z); } ptr_gradInput += input_w; } } } } #include "generic/SpatialSubSampling.cu" #include "THHGenerateFloatTypes.h" #undef CUDA_MAX_THREADS
8df3a93feeac5c709c5093921eb15eda025f7090.cu
#include "THCUNN.h" #include "THCTensor.hpp" #include "TH/THHalf.h" #include "THCHalfAutoNumerics.cuh" #include "THCAtomics.cuh" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function subsamples an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output, 1D weight, 1D bias */ template <typename Dtype, typename Acctype> __global__ void subsample(Dtype *input, Dtype *output, Dtype *weight, Dtype *bias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // Get the good mask for (k,i) (k out, i in) Dtype the_weight = weight[k]; // Initialize to the bias Dtype the_bias = bias[k]; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... Dtype *ptr_input = input + yy*dH*input_w + xx*dW; Dtype *ptr_output = output + yy*output_w + xx; Acctype sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = ScalarConvert<Acctype, Dtype>::to(the_weight*sum + the_bias); } } } /* * Description: * this function computes the gradWeight from input and gradOutput */ template <typename Dtype, typename Acctype> __global__ void subgradweight(Dtype *input, Dtype *gradOutput, Dtype *gradWeight, Dtype *gradBias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW, float scale) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; input = input + i*input_w*input_h; // thread ID int tid = blockDim.x*threadIdx.y + threadIdx.x; // create array to hold partial sums __shared__ Acctype sums[CUDA_MAX_THREADS]; sums[tid] = 0; // compute partial sums for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { Dtype *ptr_input = input + yy*dH*input_w + xx*dW; Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx; Dtype z = *ptr_gradOutput; int64_t kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { sums[tid] += z * ptr_input[kx]; } ptr_input += input_w; } } } __syncthreads(); // reduce: accumulate all partial sums to produce final gradWeight if ((threadIdx.x == 0) && (threadIdx.y == 0)) { Acctype scaledSums = Acctype(0); for(int i = 0; i < blockDim.x*blockDim.y; i++) { scaledSums += scale*sums[i]; } gradWeight[k] += ScalarConvert<Acctype, Dtype>::to(scaledSums); } __syncthreads(); // compute gradBias sums[tid] = 0; for (int i=tid; i<output_w*output_h; i+=(blockDim.x*blockDim.y)) { sums[tid] += gradOutput[i]; } __syncthreads(); // reduce gradBias if ((threadIdx.x == 0) && (threadIdx.y == 0)) { Acctype scaledSums = Acctype(0); for (int i=0; i<(blockDim.x*blockDim.y); i++) { scaledSums += scale*sums[i]; } gradBias[k] += ScalarConvert<Acctype, Dtype>::to(scaledSums); } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename Dtype> __global__ void subgradinput(Dtype *gradInput, Dtype *gradOutput, Dtype *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight Dtype the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { Dtype *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx; Dtype z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { // FIXME: should this be done at accreal precision? ptr_gradInput[kx] += z; } ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename Dtype> __global__ void subgradinputAtomic(Dtype *gradInput, Dtype *gradOutput, Dtype *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int k = blockIdx.x % input_n; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight Dtype the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { Dtype *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; Dtype *ptr_gradOutput = gradOutput + yy*output_w + xx; Dtype z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { // FIXME: should this be done at accreal precision? atomicAdd(&(ptr_gradInput[kx]), z); } ptr_gradInput += input_w; } } } } #include "generic/SpatialSubSampling.cu" #include "THCGenerateFloatTypes.h" #undef CUDA_MAX_THREADS
6788f0a8b20d4041e41b2c182cfd8b18d0f7ef84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_paraMeters.h" #include<iostream> #include<iomanip> #include<stdlib.h> #include<stdio.h> #include<assert.h> #include <cusolverDn.h> #include <rocblas.h> #include <hip/hip_runtime_api.h> #include "Utilities.cuh" #define BLOCK_SIZE 32 /***************/ /* COPY KERNEL */ /***************/ __global__ void copy_kernel(const double * __restrict d_in1, double * __restrict d_out1, const double * __restrict d_in2, double * __restrict d_out2, const int M, const int N) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if ((i < N) && (j < N)) { d_out1[j * N + i] = d_in1[j * M + i]; d_out2[j * N + i] = d_in2[j * M + i]; } } /********/ /* MAIN */ /********/ int main(){ // --- ASSUMPTION Nrows >= Ncols const int Nrows = 7; const int Ncols = 5; // --- cuSOLVE input/output parameters/arrays int work_size = 0; int *devInfo; gpuErrchk(hipMalloc(&devInfo, sizeof(int))); // --- CUDA solver initialization hipsolverDnHandle_t solver_handle; hipsolverDnCreate(&solver_handle); // --- CUBLAS initialization hipblasHandle_t cublas_handle; cublasSafeCall(hipblasCreate(&cublas_handle)); // --- Setting the host, Nrows x Ncols matrix double *h_A = (double *)malloc(Nrows * Ncols * sizeof(double)); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Ncols; i++) h_A[j + i*Nrows] = (i + j*j) * sqrt((double)(i + j)); // --- Setting the device matrix and moving the host matrix to the device double *d_A; gpuErrchk(hipMalloc(&d_A, Nrows * Ncols * sizeof(double))); gpuErrchk(hipMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), hipMemcpyHostToDevice)); // --- CUDA QR initialization double *d_TAU; gpuErrchk(hipMalloc((void**)&d_TAU, min(Nrows, Ncols) * sizeof(double))); cusolveSafeCall(hipsolverDnDgeqrf_bufferSize(solver_handle, Nrows, Ncols, d_A, Nrows, &work_size)); double *work; gpuErrchk(hipMalloc(&work, work_size * sizeof(double))); // --- CUDA GERF execution cusolveSafeCall(hipsolverDnDgeqrf(solver_handle, Nrows, Ncols, d_A, Nrows, d_TAU, work, work_size, devInfo)); int devInfo_h = 0; gpuErrchk(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost)); if (devInfo_h != 0) std::cout << "Unsuccessful gerf execution\n\n"; // --- At this point, the upper triangular part of A contains the elements of R. Showing this. gpuErrchk(hipMemcpy(h_A, d_A, Nrows * Ncols * sizeof(double), hipMemcpyDeviceToHost)); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Ncols; i++) if (i >= j) printf("R[%i, %i] = %f\n", j, i, h_A[j + i*Nrows]); // --- Initializing the output Q matrix (Of course, this step could be done by a kernel function directly on the device) double *h_Q = (double *)malloc(Nrows * Nrows * sizeof(double)); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Nrows; i++) if (j == i) h_Q[j + i*Nrows] = 1.; else h_Q[j + i*Nrows] = 0.; double *d_Q; gpuErrchk(hipMalloc(&d_Q, Nrows * Nrows * sizeof(double))); gpuErrchk(hipMemcpy(d_Q, h_Q, Nrows * Nrows * sizeof(double), hipMemcpyHostToDevice)); // --- CUDA QR execution cusolveSafeCall(hipsolverDnDormqr(solver_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_N, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_Q, Nrows, work, work_size, devInfo)); // --- At this point, d_Q contains the elements of Q. Showing this. gpuErrchk(hipMemcpy(h_Q, d_Q, Nrows * Nrows * sizeof(double), hipMemcpyDeviceToHost)); printf("\n\n"); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Nrows; i++) printf("Q[%i, %i] = %f\n", j, i, h_Q[j + i*Nrows]); // --- Initializing the data matrix C (Of course, this step could be done by a kernel function directly on the device). // --- Notice that, in this case, only the first column of C contains actual data, the others being empty (zeroed). However, cuBLAS trsm // has the capability of solving triangular linear systems with multiple right hand sides. double *h_C = (double *)calloc(Nrows * Nrows, sizeof(double)); for(int j = 0; j < Nrows; j++) h_C[j] = 1.; double *d_C; gpuErrchk(hipMalloc(&d_C, Nrows * Nrows * sizeof(double))); gpuErrchk(hipMemcpy(d_C, h_C, Nrows * Nrows * sizeof(double), hipMemcpyHostToDevice)); // --- CUDA QR execution cusolveSafeCall(hipsolverDnDormqr(solver_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_T, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_C, Nrows, work, work_size, devInfo)); // --- At this point, d_C contains the elements of Q^T * C, where C is the data vector. Showing this. // --- According to the above, only the first column of d_C makes sense. gpuErrchk(hipMemcpy(h_C, d_C, Nrows * Nrows * sizeof(double), hipMemcpyDeviceToHost)); printf("\n\n"); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Nrows; i++) printf("C[%i, %i] = %f\n", j, i, h_C[j + i*Nrows]); // --- Reducing the linear system size double *d_R; gpuErrchk(hipMalloc(&d_R, Ncols * Ncols * sizeof(double))); double *h_B = (double *)malloc(Ncols * Ncols * sizeof(double)); double *d_B; gpuErrchk(hipMalloc(&d_B, Ncols * Ncols * sizeof(double))); dim3 Grid(iDivUp(Ncols, BLOCK_SIZE), iDivUp(Ncols, BLOCK_SIZE)); dim3 Block(BLOCK_SIZE, BLOCK_SIZE); hipLaunchKernelGGL(( copy_kernel), dim3(Grid), dim3(Block), 0, 0, d_A, d_R, d_C, d_B, Nrows, Ncols); // --- Solving an upper triangular linear system const double alpha = 1.; cublasSafeCall(hipblasDtrsm(cublas_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, Ncols, Ncols, &alpha, d_R, Ncols, d_B, Ncols)); gpuErrchk(hipMemcpy(h_B, d_B, Ncols * Ncols * sizeof(double), hipMemcpyDeviceToHost)); printf("\n\n"); for (int i=0; i<Ncols; i++) printf("B[%i] = %f\n", i, h_B[i]); hipsolverDnDestroy(solver_handle); return 0; }
6788f0a8b20d4041e41b2c182cfd8b18d0f7ef84.cu
#include "cuda_runtime.h" #include "device_launch_paraMeters.h" #include<iostream> #include<iomanip> #include<stdlib.h> #include<stdio.h> #include<assert.h> #include <cusolverDn.h> #include <cublas_v2.h> #include <cuda_runtime_api.h> #include "Utilities.cuh" #define BLOCK_SIZE 32 /***************/ /* COPY KERNEL */ /***************/ __global__ void copy_kernel(const double * __restrict d_in1, double * __restrict d_out1, const double * __restrict d_in2, double * __restrict d_out2, const int M, const int N) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if ((i < N) && (j < N)) { d_out1[j * N + i] = d_in1[j * M + i]; d_out2[j * N + i] = d_in2[j * M + i]; } } /********/ /* MAIN */ /********/ int main(){ // --- ASSUMPTION Nrows >= Ncols const int Nrows = 7; const int Ncols = 5; // --- cuSOLVE input/output parameters/arrays int work_size = 0; int *devInfo; gpuErrchk(cudaMalloc(&devInfo, sizeof(int))); // --- CUDA solver initialization cusolverDnHandle_t solver_handle; cusolverDnCreate(&solver_handle); // --- CUBLAS initialization cublasHandle_t cublas_handle; cublasSafeCall(cublasCreate(&cublas_handle)); // --- Setting the host, Nrows x Ncols matrix double *h_A = (double *)malloc(Nrows * Ncols * sizeof(double)); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Ncols; i++) h_A[j + i*Nrows] = (i + j*j) * sqrt((double)(i + j)); // --- Setting the device matrix and moving the host matrix to the device double *d_A; gpuErrchk(cudaMalloc(&d_A, Nrows * Ncols * sizeof(double))); gpuErrchk(cudaMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), cudaMemcpyHostToDevice)); // --- CUDA QR initialization double *d_TAU; gpuErrchk(cudaMalloc((void**)&d_TAU, min(Nrows, Ncols) * sizeof(double))); cusolveSafeCall(cusolverDnDgeqrf_bufferSize(solver_handle, Nrows, Ncols, d_A, Nrows, &work_size)); double *work; gpuErrchk(cudaMalloc(&work, work_size * sizeof(double))); // --- CUDA GERF execution cusolveSafeCall(cusolverDnDgeqrf(solver_handle, Nrows, Ncols, d_A, Nrows, d_TAU, work, work_size, devInfo)); int devInfo_h = 0; gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost)); if (devInfo_h != 0) std::cout << "Unsuccessful gerf execution\n\n"; // --- At this point, the upper triangular part of A contains the elements of R. Showing this. gpuErrchk(cudaMemcpy(h_A, d_A, Nrows * Ncols * sizeof(double), cudaMemcpyDeviceToHost)); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Ncols; i++) if (i >= j) printf("R[%i, %i] = %f\n", j, i, h_A[j + i*Nrows]); // --- Initializing the output Q matrix (Of course, this step could be done by a kernel function directly on the device) double *h_Q = (double *)malloc(Nrows * Nrows * sizeof(double)); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Nrows; i++) if (j == i) h_Q[j + i*Nrows] = 1.; else h_Q[j + i*Nrows] = 0.; double *d_Q; gpuErrchk(cudaMalloc(&d_Q, Nrows * Nrows * sizeof(double))); gpuErrchk(cudaMemcpy(d_Q, h_Q, Nrows * Nrows * sizeof(double), cudaMemcpyHostToDevice)); // --- CUDA QR execution cusolveSafeCall(cusolverDnDormqr(solver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_N, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_Q, Nrows, work, work_size, devInfo)); // --- At this point, d_Q contains the elements of Q. Showing this. gpuErrchk(cudaMemcpy(h_Q, d_Q, Nrows * Nrows * sizeof(double), cudaMemcpyDeviceToHost)); printf("\n\n"); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Nrows; i++) printf("Q[%i, %i] = %f\n", j, i, h_Q[j + i*Nrows]); // --- Initializing the data matrix C (Of course, this step could be done by a kernel function directly on the device). // --- Notice that, in this case, only the first column of C contains actual data, the others being empty (zeroed). However, cuBLAS trsm // has the capability of solving triangular linear systems with multiple right hand sides. double *h_C = (double *)calloc(Nrows * Nrows, sizeof(double)); for(int j = 0; j < Nrows; j++) h_C[j] = 1.; double *d_C; gpuErrchk(cudaMalloc(&d_C, Nrows * Nrows * sizeof(double))); gpuErrchk(cudaMemcpy(d_C, h_C, Nrows * Nrows * sizeof(double), cudaMemcpyHostToDevice)); // --- CUDA QR execution cusolveSafeCall(cusolverDnDormqr(solver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_T, Nrows, Ncols, min(Nrows, Ncols), d_A, Nrows, d_TAU, d_C, Nrows, work, work_size, devInfo)); // --- At this point, d_C contains the elements of Q^T * C, where C is the data vector. Showing this. // --- According to the above, only the first column of d_C makes sense. gpuErrchk(cudaMemcpy(h_C, d_C, Nrows * Nrows * sizeof(double), cudaMemcpyDeviceToHost)); printf("\n\n"); for(int j = 0; j < Nrows; j++) for(int i = 0; i < Nrows; i++) printf("C[%i, %i] = %f\n", j, i, h_C[j + i*Nrows]); // --- Reducing the linear system size double *d_R; gpuErrchk(cudaMalloc(&d_R, Ncols * Ncols * sizeof(double))); double *h_B = (double *)malloc(Ncols * Ncols * sizeof(double)); double *d_B; gpuErrchk(cudaMalloc(&d_B, Ncols * Ncols * sizeof(double))); dim3 Grid(iDivUp(Ncols, BLOCK_SIZE), iDivUp(Ncols, BLOCK_SIZE)); dim3 Block(BLOCK_SIZE, BLOCK_SIZE); copy_kernel<<<Grid, Block>>>(d_A, d_R, d_C, d_B, Nrows, Ncols); // --- Solving an upper triangular linear system const double alpha = 1.; cublasSafeCall(cublasDtrsm(cublas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, Ncols, Ncols, &alpha, d_R, Ncols, d_B, Ncols)); gpuErrchk(cudaMemcpy(h_B, d_B, Ncols * Ncols * sizeof(double), cudaMemcpyDeviceToHost)); printf("\n\n"); for (int i=0; i<Ncols; i++) printf("B[%i] = %f\n", i, h_B[i]); cusolverDnDestroy(solver_handle); return 0; }
1cc745e08529ae7d6308b58c706e247804e43313.hip
// !!! This is a file automatically generated by hipify!!! /** * (C) Copyright 2020 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "pwu_kernel_parameter.h" #include "rpu_pulsed_meta_parameter.h" #include "rpucuda_linearstep_device.h" #include <memory> namespace RPU { template <typename T> struct UpdateFunctorLinearStepMult { __device__ __forceinline__ void operator()( T &w, uint32_t n, uint32_t negative, const float4 par_4, const float2 lin_slope, T &par_1, const T *global_par, T noise_std_dw, hiprandState_t &local_state) { T lin_dw = (negative > 0) ? (par_4.w) : (-par_4.y); //[3], [1] T lin_a = (negative > 0) ? (lin_slope.y) : (-lin_slope.x); // [1],[0] // n is larger 0 in any case if (n == 1) { if (noise_std_dw > 0) { T stoch_value = hiprand_normal(&local_state); stoch_value *= noise_std_dw; w += (lin_a * w + lin_dw) * (1.0 + stoch_value); } else { w += lin_a * w + lin_dw; } } else { if (noise_std_dw > 0) { for (int i_updates = 0; i_updates < n; i_updates++) { T stoch_value = hiprand_normal(&local_state); stoch_value *= noise_std_dw; w += (lin_a * w + lin_dw) * (1.0 + stoch_value); } } else { for (int i_updates = 0; i_updates < n; i_updates++) { w += lin_a * w + lin_dw; } } } // better always check both bounds T wmax = par_4.z; // [2] w = (w > wmax) ? wmax : w; T wmin = par_4.x; // [0] w = (w < wmin) ? wmin : w; } }; template <typename T> struct UpdateFunctorLinearStepAdd { __device__ __forceinline__ void operator()( T &w, uint32_t n, uint32_t negative, const float4 par_4, const float2 lin_slope, T &par_1, const T *global_par, T noise_std_dw, hiprandState_t &local_state) { T lin_dw = (negative > 0) ? (par_4.w) : (-par_4.y); // [3] [1] T lin_a = (negative > 0) ? (lin_slope.y) : (-lin_slope.x); //[1],[0] // n is larger 0 in any case if (n == 1) { if (noise_std_dw > 0) { T stoch_value = hiprand_normal(&local_state); stoch_value *= noise_std_dw; w += lin_a * w + lin_dw * (1.0 + stoch_value); } else { w += lin_a * w + lin_dw; } } else { if (noise_std_dw > 0) { for (int i_updates = 0; i_updates < n; i_updates++) { T stoch_value = hiprand_normal(&local_state); stoch_value *= noise_std_dw; w += lin_a * w + lin_dw * (1.0 + stoch_value); } } else { for (int i_updates = 0; i_updates < n; i_updates++) { w += lin_a * w + lin_dw; } } } T wmax = par_4.z; // [2] w = (w > wmax) ? wmax : w; T wmin = par_4.x; // [0] w = (w < wmin) ? wmin : w; } }; #define ARGS \ (this->context_, this->x_size_, this->d_size_, m_batch, nK32, use_bo64, out_trans, up, \ par.getName()) template <typename T> pwukpvec_t<T> LinearStepRPUDeviceCuda<T>::getUpdateKernels( int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) { pwukpvec_t<T> v; const auto &par = getPar(); if (par.ls_mult_noise) { v.push_back( RPU::make_unique<PWUKernelParameterSingleFunctor<T, UpdateFunctorLinearStepMult<T>, 1>> ARGS); v.push_back( RPU::make_unique<PWUKernelParameterBatchFunctor<T, UpdateFunctorLinearStepMult<T>, 1>> ARGS); v.push_back( RPU::make_unique<PWUKernelParameterBatchSharedFunctor<T, UpdateFunctorLinearStepMult<T>, 1>> ARGS); } else { v.push_back( RPU::make_unique<PWUKernelParameterSingleFunctor<T, UpdateFunctorLinearStepAdd<T>, 1>> ARGS); v.push_back( RPU::make_unique<PWUKernelParameterBatchFunctor<T, UpdateFunctorLinearStepAdd<T>, 1>> ARGS); v.push_back( RPU::make_unique<PWUKernelParameterBatchSharedFunctor<T, UpdateFunctorLinearStepAdd<T>, 1>> ARGS); } return v; } #undef ARGS template class LinearStepRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class LinearStepRPUDeviceCuda<double>; #endif } // namespace RPU
1cc745e08529ae7d6308b58c706e247804e43313.cu
/** * (C) Copyright 2020 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "pwu_kernel_parameter.h" #include "rpu_pulsed_meta_parameter.h" #include "rpucuda_linearstep_device.h" #include <memory> namespace RPU { template <typename T> struct UpdateFunctorLinearStepMult { __device__ __forceinline__ void operator()( T &w, uint32_t n, uint32_t negative, const float4 par_4, const float2 lin_slope, T &par_1, const T *global_par, T noise_std_dw, curandState &local_state) { T lin_dw = (negative > 0) ? (par_4.w) : (-par_4.y); //[3], [1] T lin_a = (negative > 0) ? (lin_slope.y) : (-lin_slope.x); // [1],[0] // n is larger 0 in any case if (n == 1) { if (noise_std_dw > 0) { T stoch_value = curand_normal(&local_state); stoch_value *= noise_std_dw; w += (lin_a * w + lin_dw) * (1.0 + stoch_value); } else { w += lin_a * w + lin_dw; } } else { if (noise_std_dw > 0) { for (int i_updates = 0; i_updates < n; i_updates++) { T stoch_value = curand_normal(&local_state); stoch_value *= noise_std_dw; w += (lin_a * w + lin_dw) * (1.0 + stoch_value); } } else { for (int i_updates = 0; i_updates < n; i_updates++) { w += lin_a * w + lin_dw; } } } // better always check both bounds T wmax = par_4.z; // [2] w = (w > wmax) ? wmax : w; T wmin = par_4.x; // [0] w = (w < wmin) ? wmin : w; } }; template <typename T> struct UpdateFunctorLinearStepAdd { __device__ __forceinline__ void operator()( T &w, uint32_t n, uint32_t negative, const float4 par_4, const float2 lin_slope, T &par_1, const T *global_par, T noise_std_dw, curandState &local_state) { T lin_dw = (negative > 0) ? (par_4.w) : (-par_4.y); // [3] [1] T lin_a = (negative > 0) ? (lin_slope.y) : (-lin_slope.x); //[1],[0] // n is larger 0 in any case if (n == 1) { if (noise_std_dw > 0) { T stoch_value = curand_normal(&local_state); stoch_value *= noise_std_dw; w += lin_a * w + lin_dw * (1.0 + stoch_value); } else { w += lin_a * w + lin_dw; } } else { if (noise_std_dw > 0) { for (int i_updates = 0; i_updates < n; i_updates++) { T stoch_value = curand_normal(&local_state); stoch_value *= noise_std_dw; w += lin_a * w + lin_dw * (1.0 + stoch_value); } } else { for (int i_updates = 0; i_updates < n; i_updates++) { w += lin_a * w + lin_dw; } } } T wmax = par_4.z; // [2] w = (w > wmax) ? wmax : w; T wmin = par_4.x; // [0] w = (w < wmin) ? wmin : w; } }; #define ARGS \ (this->context_, this->x_size_, this->d_size_, m_batch, nK32, use_bo64, out_trans, up, \ par.getName()) template <typename T> pwukpvec_t<T> LinearStepRPUDeviceCuda<T>::getUpdateKernels( int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) { pwukpvec_t<T> v; const auto &par = getPar(); if (par.ls_mult_noise) { v.push_back( RPU::make_unique<PWUKernelParameterSingleFunctor<T, UpdateFunctorLinearStepMult<T>, 1>> ARGS); v.push_back( RPU::make_unique<PWUKernelParameterBatchFunctor<T, UpdateFunctorLinearStepMult<T>, 1>> ARGS); v.push_back( RPU::make_unique<PWUKernelParameterBatchSharedFunctor<T, UpdateFunctorLinearStepMult<T>, 1>> ARGS); } else { v.push_back( RPU::make_unique<PWUKernelParameterSingleFunctor<T, UpdateFunctorLinearStepAdd<T>, 1>> ARGS); v.push_back( RPU::make_unique<PWUKernelParameterBatchFunctor<T, UpdateFunctorLinearStepAdd<T>, 1>> ARGS); v.push_back( RPU::make_unique<PWUKernelParameterBatchSharedFunctor<T, UpdateFunctorLinearStepAdd<T>, 1>> ARGS); } return v; } #undef ARGS template class LinearStepRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class LinearStepRPUDeviceCuda<double>; #endif } // namespace RPU
ad2c2c9fbf15be8cbe369f0f6dc85ffc1c1fab41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void square(float* d_out, float* d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; ++i) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float* d_in; float* d_out; // allocate GPU memory hipMalloc((void**) &d_in, ARRAY_BYTES); hipMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); // copy back the result array to the CPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // print out the resulting array for (int i = 0; i < ARRAY_SIZE; ++i) { printf("%f", h_out[i]); printf(i % 4 != 3 ? "\t" : "\n"); } // free GPU memory allocation hipFree(d_in); hipFree(d_out); return 0; }
ad2c2c9fbf15be8cbe369f0f6dc85ffc1c1fab41.cu
#include <stdio.h> __global__ void square(float* d_out, float* d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f; } int main(int argc, char ** argv) { const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; ++i) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float* d_in; float* d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel square<<<1, ARRAY_SIZE>>>(d_out, d_in); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i = 0; i < ARRAY_SIZE; ++i) { printf("%f", h_out[i]); printf(i % 4 != 3 ? "\t" : "\n"); } // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); return 0; }
7046aaa909443d241e800fbfc1496ee0241614ea.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ #include <hip/hip_runtime.h> #include <cusp/array1d.h> #include <cusp/blas/blas.h> #include <cusp/dia_matrix.h> #include <cusp/monitor.h> #include <cusp/precond/diagonal.h> #include <cusp/krylov/bicgstab.h> #include <cusp/krylov/cg.h> #include <cusp/print.h> #include <thrust/device_ptr.h> #include "cuda_bicgstab.h" #include "cuda_bluebottle.h" //#include "entrySearch.h" #include "cuda_particle.h" #ifdef TEST #include "cuda_testing.h" #endif extern "C" void cuda_ustar_helmholtz(int rank) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); hipSetDevice(dev + dev_start); // write right-hand side cuda_ustar_rhs(dev); cuda_dom_BC_star(); if(nparts > 0) { cuda_part_BC_star(); } // create temporary U* without ghost cells for bicgstab result cusp::array1d<real, cusp::device_memory> ustar_tmp(dom[dev].Gfx.s3, 0.); // create CUSP diagonal matrix for p cusp::dia_matrix<int, real, cusp::device_memory> *_A_ustar; _A_ustar = new cusp::dia_matrix<int, real, cusp::device_memory> (dom[dev].Gfx._s3, dom[dev].Gfx._s3, 0, 13); // set up the coefficient matrix _A_ustar->diagonal_offsets[0] = -dom[dev].Gfx._s3 + dom[dev].Gfx._s2; _A_ustar->diagonal_offsets[1] = -dom[dev].Gfx._s2; _A_ustar->diagonal_offsets[2] = -dom[dev].Gfx._s2 + dom[dev].Gfx._s1; _A_ustar->diagonal_offsets[3] = -dom[dev].Gfx._s1; _A_ustar->diagonal_offsets[4] = -dom[dev].Gfx._s1 + 2; _A_ustar->diagonal_offsets[5] = -1; _A_ustar->diagonal_offsets[6] = 0; _A_ustar->diagonal_offsets[7] = 1; _A_ustar->diagonal_offsets[8] = dom[dev].Gfx._s1 - 2; _A_ustar->diagonal_offsets[9] = dom[dev].Gfx._s1; _A_ustar->diagonal_offsets[10] = dom[dev].Gfx._s2 - dom[dev].Gfx._s1; _A_ustar->diagonal_offsets[11] = dom[dev].Gfx._s2; _A_ustar->diagonal_offsets[12] = dom[dev].Gfx._s3 - dom[dev].Gfx._s2; // write coefficients using kernel int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gfx._in < MAX_THREADS_DIM) threads_x = dom[dev].Gfx._in; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfx._jn < MAX_THREADS_DIM) threads_y = dom[dev].Gfx._jn; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfx._kn < MAX_THREADS_DIM) threads_z = dom[dev].Gfx._kn; else threads_z = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfx._in / (real) threads_x); blocks_y = (int)ceil((real) dom[dev].Gfx._jn / (real) threads_y); blocks_z = (int)ceil((real) dom[dev].Gfx._kn / (real) threads_z); dim3 dimBlocks_x(threads_y, threads_z); dim3 numBlocks_x(blocks_y, blocks_z); dim3 dimBlocks_y(threads_z, threads_x); dim3 numBlocks_y(blocks_z, blocks_x); dim3 dimBlocks_z(threads_x, threads_y); dim3 numBlocks_z(blocks_x, blocks_y); // create temporary ustar without ghost cells real *_ustar_noghost; (hipMalloc((void**) &_ustar_noghost, sizeof(real) * dom[dev].Gfx.s3)); // copy u_star into noghost structure for Helmholtz right-hand side hipLaunchKernelGGL(( copy_u_noghost), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _ustar_noghost, _u_star[dev], _dom[dev]); // build pressure-Poisson coefficient matrix hipLaunchKernelGGL(( ustar_coeffs_init), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); hipLaunchKernelGGL(( ustar_coeffs), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0]), _flag_u[dev], _flag_v[dev], _flag_w[dev]); /* cusp::dia_matrix<int, real, cusp::host_memory> AA = *_A_ustar; printf("\n"); for(int i = 0; i < dom[dev].Gfx.s3; i++) { for(int j = 0; j < dom[dev].Gfx.s3; j++) { if(j == AA.diagonal_offsets[0] + i) if(AA.values(i, 0) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 0)); else if(j == AA.diagonal_offsets[1] + i) if(AA.values(i, 1) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 1)); else if(j == AA.diagonal_offsets[2] + i) if(AA.values(i, 2) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 2)); else if(j == AA.diagonal_offsets[3] + i) if(AA.values(i, 3) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 3)); else if(j == AA.diagonal_offsets[4] + i) if(AA.values(i, 4) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 4)); else if(j == AA.diagonal_offsets[5] + i) if(AA.values(i, 5) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 5)); else if(j == AA.diagonal_offsets[6] + i) if(AA.values(i, 6) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 6)); else if(j == AA.diagonal_offsets[7] + i) if(AA.values(i, 7) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 7)); else if(j == AA.diagonal_offsets[8] + i) if(AA.values(i, 8) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 8)); else if(j == AA.diagonal_offsets[9] + i) if(AA.values(i, 9) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 9)); else if(j == AA.diagonal_offsets[10] + i) if(AA.values(i, 10) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 10)); else if(j == AA.diagonal_offsets[11] + i) if(AA.values(i, 11) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 11)); else if(j == AA.diagonal_offsets[12] + i) if(AA.values(i, 12) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 12)); else printf(" "); } printf("\n"); } */ // create CUSP pointer to right-hand side thrust::device_ptr<real> _ptr_ustar(_ustar_noghost); cusp::array1d<real, cusp::device_memory> *_ustar_rhs; _ustar_rhs = new cusp::array1d<real, cusp::device_memory>(_ptr_ustar, _ptr_ustar + dom[dev].Gfx._s3); // account for boundary conditions if(bc.uW == PERIODIC) hipLaunchKernelGGL(( ustar_coeffs_periodic_W), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uW == DIRICHLET) hipLaunchKernelGGL(( ustar_coeffs_dirichlet_W), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, bc.uWD, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uE == PERIODIC) hipLaunchKernelGGL(( ustar_coeffs_periodic_E), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uE == DIRICHLET) hipLaunchKernelGGL(( ustar_coeffs_dirichlet_E), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, bc.uED, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uS == PERIODIC) hipLaunchKernelGGL(( ustar_coeffs_periodic_S), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uS == DIRICHLET) hipLaunchKernelGGL(( ustar_coeffs_dirichlet_S), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, bc.uSD, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uN == PERIODIC) hipLaunchKernelGGL(( ustar_coeffs_periodic_N), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uN == DIRICHLET) hipLaunchKernelGGL(( ustar_coeffs_dirichlet_N), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, bc.uND, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uB == PERIODIC) hipLaunchKernelGGL(( ustar_coeffs_periodic_B), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uB == DIRICHLET) hipLaunchKernelGGL(( ustar_coeffs_dirichlet_B), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, bc.uBD, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uT == PERIODIC) hipLaunchKernelGGL(( ustar_coeffs_periodic_T), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uT == DIRICHLET) hipLaunchKernelGGL(( ustar_coeffs_dirichlet_T), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, bc.uTD, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); hipLaunchKernelGGL(( ustar_coeffs_particles), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0]), _flag_u[dev]); // normalize the problem by the right-hand side before sending to CUSP real norm = cusp::blas::nrm2(*_ustar_rhs); if(norm == 0) norm = 1.; cusp::blas::scal(*_ustar_rhs, 1. / norm); // call BiCGSTAB to solve for ustar_tmp cusp::monitor<real> monitor(*_ustar_rhs, pp_max_iter, pp_residual); cusp::precond::diagonal<real, cusp::device_memory> M(*_A_ustar); //cusp::krylov::bicgstab(*_A_ustar, ustar_tmp, *_ustar_rhs, monitor, M); cusp::krylov::cg(*_A_ustar, ustar_tmp, *_ustar_rhs, monitor, M); // write convergence data to file if(rank == 0) { char nam[FILE_NAME_SIZE] = "solver_helmholtz_expd.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } else { char nam[FILE_NAME_SIZE] = "solver_helmholtz_prec.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } if(!monitor.converged()) { printf("The u_star Helmholtz equation did not converge. \n"); exit(EXIT_FAILURE); } // unnormalize the solution cusp::blas::scal(ustar_tmp, norm); // copy solution back to _u_star hipLaunchKernelGGL(( copy_u_ghost), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _u_star[dev], thrust::raw_pointer_cast(ustar_tmp.data()), _dom[dev]); // clean up delete(_ustar_rhs); delete(_A_ustar); (hipFree(_ustar_noghost)); #ifdef TEST // copy _u_star to _u hipMemcpy(_u[dev], _u_star[dev], dom[dev].Gfx.s3b * sizeof(real), hipMemcpyDeviceToDevice); #endif } } extern "C" void cuda_vstar_helmholtz(int rank) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); hipSetDevice(dev + dev_start); // write right-hand side cuda_vstar_rhs(dev); cuda_dom_BC_star(); if(nparts > 0) { cuda_part_BC_star(); } // create temporary U* without ghost cells for bicgstab result cusp::array1d<real, cusp::device_memory> vstar_tmp(dom[dev].Gfy.s3, 0.); // create CUSP diagonal matrix for p cusp::dia_matrix<int, real, cusp::device_memory> *_A_vstar; _A_vstar = new cusp::dia_matrix<int, real, cusp::device_memory> (dom[dev].Gfy._s3, dom[dev].Gfy._s3, 0, 13); // set up the coefficient matrix _A_vstar->diagonal_offsets[0] = -dom[dev].Gfy._s3 + dom[dev].Gfy._s2; _A_vstar->diagonal_offsets[1] = -dom[dev].Gfy._s2; _A_vstar->diagonal_offsets[2] = -dom[dev].Gfy._s2 + 2*dom[dev].Gfy._s1; _A_vstar->diagonal_offsets[3] = -dom[dev].Gfy._s1; _A_vstar->diagonal_offsets[4] = -dom[dev].Gfy._s1 + 1; _A_vstar->diagonal_offsets[5] = -1; _A_vstar->diagonal_offsets[6] = 0; _A_vstar->diagonal_offsets[7] = 1; _A_vstar->diagonal_offsets[8] = dom[dev].Gfy._s1 - 1; _A_vstar->diagonal_offsets[9] = dom[dev].Gfy._s1; _A_vstar->diagonal_offsets[10] = dom[dev].Gfy._s2 - 2*dom[dev].Gfy._s1; _A_vstar->diagonal_offsets[11] = dom[dev].Gfy._s2; _A_vstar->diagonal_offsets[12] = dom[dev].Gfy._s3 - dom[dev].Gfy._s2; // write coefficients using kernel int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gfy._in < MAX_THREADS_DIM) threads_x = dom[dev].Gfy._in; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfy._jn < MAX_THREADS_DIM) threads_y = dom[dev].Gfy._jn; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfy._kn < MAX_THREADS_DIM) threads_z = dom[dev].Gfy._kn; else threads_z = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfy._in / (real) threads_x); blocks_y = (int)ceil((real) dom[dev].Gfy._jn / (real) threads_y); blocks_z = (int)ceil((real) dom[dev].Gfy._kn / (real) threads_z); dim3 dimBlocks_x(threads_y, threads_z); dim3 numBlocks_x(blocks_y, blocks_z); dim3 dimBlocks_y(threads_z, threads_x); dim3 numBlocks_y(blocks_z, blocks_x); dim3 dimBlocks_z(threads_x, threads_y); dim3 numBlocks_z(blocks_x, blocks_y); // create temporary ustar without ghost cells real *_vstar_noghost; (hipMalloc((void**) &_vstar_noghost, sizeof(real) * dom[dev].Gfy.s3)); // copy v_star into noghost structure for Helmholtz right-hand side hipLaunchKernelGGL(( copy_v_noghost), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _vstar_noghost, _v_star[dev], _dom[dev]); // build pressure-Poisson coefficient matrix hipLaunchKernelGGL(( vstar_coeffs_init), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); hipLaunchKernelGGL(( vstar_coeffs), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0]), _flag_u[dev], _flag_v[dev], _flag_w[dev]); // account for boundary conditions if(bc.vW == PERIODIC) hipLaunchKernelGGL(( vstar_coeffs_periodic_W), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vW == DIRICHLET) hipLaunchKernelGGL(( vstar_coeffs_dirichlet_W), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, bc.vWD, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vE == PERIODIC) hipLaunchKernelGGL(( vstar_coeffs_periodic_E), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vE == DIRICHLET) hipLaunchKernelGGL(( vstar_coeffs_dirichlet_E), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, bc.vED, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vS == PERIODIC) hipLaunchKernelGGL(( vstar_coeffs_periodic_S), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vS == DIRICHLET) hipLaunchKernelGGL(( vstar_coeffs_dirichlet_S), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, bc.vSD, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vN == PERIODIC) hipLaunchKernelGGL(( vstar_coeffs_periodic_N), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vN == DIRICHLET) hipLaunchKernelGGL(( vstar_coeffs_dirichlet_N), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, bc.vND, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vB == PERIODIC) hipLaunchKernelGGL(( vstar_coeffs_periodic_B), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vB == DIRICHLET) hipLaunchKernelGGL(( vstar_coeffs_dirichlet_B), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, bc.vBD, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vT == PERIODIC) hipLaunchKernelGGL(( vstar_coeffs_periodic_T), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vT == DIRICHLET) hipLaunchKernelGGL(( vstar_coeffs_dirichlet_T), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, bc.vTD, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); hipLaunchKernelGGL(( vstar_coeffs_particles), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0]), _flag_v[dev]); // create CUSP pointer to right-hand side thrust::device_ptr<real> _ptr_vstar(_vstar_noghost); cusp::array1d<real, cusp::device_memory> *_vstar_rhs; _vstar_rhs = new cusp::array1d<real, cusp::device_memory>(_ptr_vstar, _ptr_vstar + dom[dev].Gfy._s3); // normalize the problem by the right-hand side before sending to CUSP real norm = cusp::blas::nrm2(*_vstar_rhs); if(norm == 0) norm = 1.; cusp::blas::scal(*_vstar_rhs, 1. / norm); // call BiCGSTAB to solve for ustar_tmp cusp::monitor<real> monitor(*_vstar_rhs, pp_max_iter, pp_residual); cusp::precond::diagonal<real, cusp::device_memory> M(*_A_vstar); //cusp::krylov::bicgstab(*_A_vstar, vstar_tmp, *_vstar_rhs, monitor, M); cusp::krylov::cg(*_A_vstar, vstar_tmp, *_vstar_rhs, monitor, M); // write convergence data to file if(rank == 0) { char nam[FILE_NAME_SIZE] = "solver_helmholtz_expd.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } else { char nam[FILE_NAME_SIZE] = "solver_helmholtz_prec.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } if(!monitor.converged()) { printf("The v_star Helmholtz equation did not converge. \n"); exit(EXIT_FAILURE); } // unnormalize the solution cusp::blas::scal(vstar_tmp, norm); // copy solution back to _v_star field hipLaunchKernelGGL(( copy_v_ghost), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _v_star[dev], thrust::raw_pointer_cast(vstar_tmp.data()), _dom[dev]); // clean up delete(_vstar_rhs); delete(_A_vstar); (hipFree(_vstar_noghost)); #ifdef TEST // copy _v_star to _v hipMemcpy(_v[dev], _v_star[dev], dom[dev].Gfy.s3b * sizeof(real), hipMemcpyDeviceToDevice); #endif } } extern "C" void cuda_wstar_helmholtz(int rank) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); hipSetDevice(dev + dev_start); // write right-hand side cuda_wstar_rhs(dev); cuda_dom_BC_star(); if(nparts > 0) { cuda_part_BC_star(); } // create temporary U* without ghost cells for bicgstab result cusp::array1d<real, cusp::device_memory> wstar_tmp(dom[dev].Gfz.s3, 0.); // create CUSP diagonal matrix for p cusp::dia_matrix<int, real, cusp::device_memory> *_A_wstar; _A_wstar = new cusp::dia_matrix<int, real, cusp::device_memory> (dom[dev].Gfz._s3, dom[dev].Gfz._s3, 0, 13); // set up the coefficient matrix _A_wstar->diagonal_offsets[0] = -dom[dev].Gfz._s3 + 2*dom[dev].Gfz._s2; _A_wstar->diagonal_offsets[1] = -dom[dev].Gfz._s2; _A_wstar->diagonal_offsets[2] = -dom[dev].Gfz._s2 + dom[dev].Gfz._s1; _A_wstar->diagonal_offsets[3] = -dom[dev].Gfz._s1; _A_wstar->diagonal_offsets[4] = -dom[dev].Gfz._s1 + 1; _A_wstar->diagonal_offsets[5] = -1; _A_wstar->diagonal_offsets[6] = 0; _A_wstar->diagonal_offsets[7] = 1; _A_wstar->diagonal_offsets[8] = dom[dev].Gfz._s1 - 1; _A_wstar->diagonal_offsets[9] = dom[dev].Gfz._s1; _A_wstar->diagonal_offsets[10] = dom[dev].Gfz._s2 - dom[dev].Gfz._s1; _A_wstar->diagonal_offsets[11] = dom[dev].Gfz._s2; _A_wstar->diagonal_offsets[12] = dom[dev].Gfz._s3 - 2*dom[dev].Gfz._s2; // write coefficients using kernel int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gfz._in < MAX_THREADS_DIM) threads_x = dom[dev].Gfz._in; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfz._jn < MAX_THREADS_DIM) threads_y = dom[dev].Gfz._jn; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfz._kn < MAX_THREADS_DIM) threads_z = dom[dev].Gfz._kn; else threads_z = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfz._in / (real) threads_x); blocks_y = (int)ceil((real) dom[dev].Gfz._jn / (real) threads_y); blocks_z = (int)ceil((real) dom[dev].Gfz._kn / (real) threads_z); dim3 dimBlocks_x(threads_y, threads_z); dim3 numBlocks_x(blocks_y, blocks_z); dim3 dimBlocks_y(threads_z, threads_x); dim3 numBlocks_y(blocks_z, blocks_x); dim3 dimBlocks_z(threads_x, threads_y); dim3 numBlocks_z(blocks_x, blocks_y); // create temporary ustar without ghost cells real *_wstar_noghost; (hipMalloc((void**) &_wstar_noghost, sizeof(real) * dom[dev].Gfz.s3)); // copy w_star into noghost structure for Helmholtz right-hand side hipLaunchKernelGGL(( copy_w_noghost), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _wstar_noghost, _w_star[dev], _dom[dev]); // build pressure-Poisson coefficient matrix hipLaunchKernelGGL(( wstar_coeffs_init), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); hipLaunchKernelGGL(( wstar_coeffs), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0]), _flag_u[dev], _flag_v[dev], _flag_w[dev]); // account for boundary conditions if(bc.wW == PERIODIC) hipLaunchKernelGGL(( wstar_coeffs_periodic_W), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wW == DIRICHLET) hipLaunchKernelGGL(( wstar_coeffs_dirichlet_W), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, bc.wWD, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wE == PERIODIC) hipLaunchKernelGGL(( wstar_coeffs_periodic_E), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wE == DIRICHLET) hipLaunchKernelGGL(( wstar_coeffs_dirichlet_E), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, bc.wED, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wS == PERIODIC) hipLaunchKernelGGL(( wstar_coeffs_periodic_S), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wS == DIRICHLET) hipLaunchKernelGGL(( wstar_coeffs_dirichlet_S), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, bc.wSD, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wN == PERIODIC) hipLaunchKernelGGL(( wstar_coeffs_periodic_N), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wN == DIRICHLET) hipLaunchKernelGGL(( wstar_coeffs_dirichlet_N), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, bc.wND, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wB == PERIODIC) hipLaunchKernelGGL(( wstar_coeffs_periodic_B), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wB == DIRICHLET) hipLaunchKernelGGL(( wstar_coeffs_dirichlet_B), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, bc.wBD, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wT == PERIODIC) hipLaunchKernelGGL(( wstar_coeffs_periodic_T), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wT == DIRICHLET) hipLaunchKernelGGL(( wstar_coeffs_dirichlet_T), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, bc.wTD, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); hipLaunchKernelGGL(( wstar_coeffs_particles), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0]), _flag_w[dev]); // create CUSP pointer to right-hand side thrust::device_ptr<real> _ptr_wstar(_wstar_noghost); cusp::array1d<real, cusp::device_memory> *_wstar_rhs; _wstar_rhs = new cusp::array1d<real, cusp::device_memory>(_ptr_wstar, _ptr_wstar + dom[dev].Gfz.s3); // normalize the problem by the right-hand side before sending to CUSP real norm = cusp::blas::nrm2(*_wstar_rhs); if(norm == 0) norm = 1.; cusp::blas::scal(*_wstar_rhs, 1. / norm); // call BiCGSTAB to solve for ustar_tmp cusp::monitor<real> monitor(*_wstar_rhs, pp_max_iter, pp_residual); cusp::precond::diagonal<real, cusp::device_memory> M(*_A_wstar); //cusp::krylov::bicgstab(*_A_wstar, wstar_tmp, *_wstar_rhs, monitor, M); cusp::krylov::cg(*_A_wstar, wstar_tmp, *_wstar_rhs, monitor, M); // write convergence data to file if(rank == 0) { char nam[FILE_NAME_SIZE] = "solver_helmholtz_expd.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } else { char nam[FILE_NAME_SIZE] = "solver_helmholtz_prec.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } if(!monitor.converged()) { printf("The w_star Helmholtz equation did not converge. \n"); exit(EXIT_FAILURE); } // unnormalize the solution cusp::blas::scal(wstar_tmp, norm); // copy solution back to _v_star field hipLaunchKernelGGL(( copy_w_ghost), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _w_star[dev], thrust::raw_pointer_cast(wstar_tmp.data()), _dom[dev]); // clean up delete(_wstar_rhs); delete(_A_wstar); (hipFree(_wstar_noghost)); #ifdef TEST // copy _w_star to _w hipMemcpy(_w[dev], _w_star[dev], dom[dev].Gfz.s3b * sizeof(real), hipMemcpyDeviceToDevice); #endif } } extern "C" void cuda_PP_bicgstab(int rank) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); hipSetDevice(dev + dev_start); // write right-hand side cuda_PP_rhs(dev); if(nparts > 0) { cuda_part_BC_p(dev); } // create CUSP diagonal matrix for p cusp::dia_matrix<int, real, cusp::device_memory> *_A_p; _A_p = new cusp::dia_matrix<int, real, cusp::device_memory> (dom[dev].Gcc._s3, dom[dev].Gcc._s3, 0, 13); // set up the coefficient matrix _A_p->diagonal_offsets[0] = -dom[dev].Gcc._s3 + dom[dev].Gcc._s2; _A_p->diagonal_offsets[1] = -dom[dev].Gcc._s2; _A_p->diagonal_offsets[2] = -dom[dev].Gcc._s2 + dom[dev].Gcc._s1; _A_p->diagonal_offsets[3] = -dom[dev].Gcc._s1; _A_p->diagonal_offsets[4] = -dom[dev].Gcc._s1 + 1; _A_p->diagonal_offsets[5] = -1; _A_p->diagonal_offsets[6] = 0; _A_p->diagonal_offsets[7] = 1; _A_p->diagonal_offsets[8] = dom[dev].Gcc._s1 - 1; _A_p->diagonal_offsets[9] = dom[dev].Gcc._s1; _A_p->diagonal_offsets[10] = dom[dev].Gcc._s2 - dom[dev].Gcc._s1; _A_p->diagonal_offsets[11] = dom[dev].Gcc._s2; _A_p->diagonal_offsets[12] = dom[dev].Gcc._s3 - dom[dev].Gcc._s2; // write coefficients using kernel int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gcc._in < MAX_THREADS_DIM) threads_x = dom[dev].Gcc._in; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gcc._jn < MAX_THREADS_DIM) threads_y = dom[dev].Gcc._jn; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gcc._kn < MAX_THREADS_DIM) threads_z = dom[dev].Gcc._kn; else threads_z = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gcc._in / (real) threads_x); blocks_y = (int)ceil((real) dom[dev].Gcc._jn / (real) threads_y); blocks_z = (int)ceil((real) dom[dev].Gcc._kn / (real) threads_z); dim3 dimBlocks_x(threads_y, threads_z); dim3 numBlocks_x(blocks_y, blocks_z); dim3 dimBlocks_y(threads_z, threads_x); dim3 numBlocks_y(blocks_z, blocks_x); dim3 dimBlocks_z(threads_x, threads_y); dim3 numBlocks_z(blocks_x, blocks_y); // build pressure-Poisson coefficient matrix hipLaunchKernelGGL(( coeffs_init), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0])); hipLaunchKernelGGL(( coeffs), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _dom[dev], _flag_u[dev], _flag_v[dev], _flag_w[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0])); if(bc.pW == PERIODIC) hipLaunchKernelGGL(( coeffs_periodic_W), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_u[dev]); if(bc.pE == PERIODIC) hipLaunchKernelGGL(( coeffs_periodic_E), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_u[dev]); if(bc.pS == PERIODIC) hipLaunchKernelGGL(( coeffs_periodic_S), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_v[dev]); if(bc.pN == PERIODIC) hipLaunchKernelGGL(( coeffs_periodic_N), dim3(numBlocks_y), dim3(dimBlocks_y), 0, 0, _dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_v[dev]); if(bc.pB == PERIODIC) hipLaunchKernelGGL(( coeffs_periodic_B), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_w[dev]); if(bc.pT == PERIODIC) hipLaunchKernelGGL(( coeffs_periodic_T), dim3(numBlocks_z), dim3(dimBlocks_z), 0, 0, _dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_w[dev]); hipLaunchKernelGGL(( coeffs_particle), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _phase[dev]); /* cusp::dia_matrix<int, real, cusp::host_memory> AA = *_A_p; printf("\n"); for(int i = 0; i < dom[dev].Gcc.s3; i++) { for(int j = 0; j < dom[dev].Gcc.s3; j++) { if(j == AA.diagonal_offsets[0] + i) if(AA.values(i, 0) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 0)); else if(j == AA.diagonal_offsets[1] + i) if(AA.values(i, 1) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 1)); else if(j == AA.diagonal_offsets[2] + i) if(AA.values(i, 2) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 2)); else if(j == AA.diagonal_offsets[3] + i) if(AA.values(i, 3) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 3)); else if(j == AA.diagonal_offsets[4] + i) if(AA.values(i, 4) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 4)); else if(j == AA.diagonal_offsets[5] + i) if(AA.values(i, 5) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 5)); else if(j == AA.diagonal_offsets[6] + i) if(AA.values(i, 6) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 6)); else if(j == AA.diagonal_offsets[7] + i) if(AA.values(i, 7) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 7)); else if(j == AA.diagonal_offsets[8] + i) if(AA.values(i, 8) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 8)); else if(j == AA.diagonal_offsets[9] + i) if(AA.values(i, 9) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 9)); else if(j == AA.diagonal_offsets[10] + i) if(AA.values(i, 10) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 10)); else if(j == AA.diagonal_offsets[11] + i) if(AA.values(i, 11) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 11)); else if(j == AA.diagonal_offsets[12] + i) if(AA.values(i, 12) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 12)); else printf(" "); } printf("\n"); } */ /* cusp::dia_matrix<int, real, cusp::host_memory> AA = *_A_p; FILE *mat = fopen("mat.txt", "w"); for(int i = 0; i < dom[dev].Gcc.s3; i++) { for(int j = 0; j < dom[dev].Gcc.s3; j++) { if(j == AA.diagonal_offsets[0] + i) if(AA.values(i, 0) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 0)); else if(j == AA.diagonal_offsets[1] + i) if(AA.values(i, 1) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 1)); else if(j == AA.diagonal_offsets[2] + i) if(AA.values(i, 2) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 2)); else if(j == AA.diagonal_offsets[3] + i) if(AA.values(i, 3) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 3)); else if(j == AA.diagonal_offsets[4] + i) if(AA.values(i, 4) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 4)); else if(j == AA.diagonal_offsets[5] + i) if(AA.values(i, 5) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 5)); else if(j == AA.diagonal_offsets[6] + i) if(AA.values(i, 6) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 6)); else if(j == AA.diagonal_offsets[7] + i) if(AA.values(i, 7) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 7)); else if(j == AA.diagonal_offsets[8] + i) if(AA.values(i, 8) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 8)); else if(j == AA.diagonal_offsets[9] + i) if(AA.values(i, 9) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 9)); else if(j == AA.diagonal_offsets[10] + i) if(AA.values(i, 10) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 10)); else if(j == AA.diagonal_offsets[11] + i) if(AA.values(i, 11) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 11)); else if(j == AA.diagonal_offsets[12] + i) if(AA.values(i, 12) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 12)); else fprintf(mat,"%f ", 0.); } } fclose(mat); */ // copy p0 to array without ghost cells and use it as an initial guess and solution space real *_phinoghost; (hipMalloc((void**) &_phinoghost, sizeof(real)*dom[dev].Gcc.s3)); hipLaunchKernelGGL(( copy_p_noghost), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _phinoghost, _phi[dev], _dom[dev]); thrust::device_ptr<real> _ptr_p_sol(_phinoghost); cusp::array1d<real, cusp::device_memory> *_p_sol; _p_sol = new cusp::array1d<real, cusp::device_memory>(_ptr_p_sol, _ptr_p_sol + dom[dev].Gcc._s3); // create CUSP pointer to right-hand side thrust::device_ptr<real> _ptr_p(_rhs_p[dev]); cusp::array1d<real, cusp::device_memory> *_pp; _pp = new cusp::array1d<real, cusp::device_memory>(_ptr_p, _ptr_p + dom[dev].Gcc._s3); /* printf("%e\n", dt); printf("_p_sol_in\n"); cusp::print(*_p_sol); printf("_pp\n"); cusp::print(*_pp); */ // normalize the problem by the right-hand side before sending to CUSP real norm = cusp::blas::nrm2(*_pp); //printf("norm = %e\n", norm/dom[dev].Gcc.s3); //if(norm > 100.*pp_residual) {//== 0) if(norm == 0) norm = 1.; cusp::blas::scal(*_pp, 1. / norm); cusp::blas::scal(*_p_sol, 1. / norm); /*cusp::array1d<real, cusp::host_memory> PP = *_pp; cusp::blas::scal(PP, norm); //cusp::print(PP); real ppsum = 0.; for(int s = 0; s < dom[dev].Gcc.s3; s++) { ppsum += PP[s]*dom[dev].dx*dom[dev].dy*dom[dev].dz; } printf("PPSUM_1 = %e\n", ppsum*norm*dt/rho_f); */ // call BiCGSTAB to solve for p_sol cusp::monitor<real> monitor(*_pp, pp_max_iter, pp_residual); cusp::precond::diagonal<real, cusp::device_memory> M(*_A_p); cusp::krylov::bicgstab(*_A_p, *_p_sol, *_pp, monitor, M); // write convergence data to file if(rank == 0) { char nam[FILE_NAME_SIZE] = "solver_expd.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } else { char nam[FILE_NAME_SIZE] = "solver_prec.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } if(!monitor.converged()) { printf("The pressure-Poisson equation did not converge. \n"); exit(EXIT_FAILURE); } // unnormalize the solution cusp::blas::scal(*_p_sol, norm); // calculate average pressure //real p_avg = avg_entries(dom[dev].Gcc.s3, // thrust::raw_pointer_cast(_p_sol->data())); real p_avg = thrust::reduce(_p_sol->begin(), _p_sol->end(), (real) 0., thrust::plus<real>()) / dom[dev].Gcc.s3; // subtract average value from pressure cusp::array1d<real, cusp::device_memory> ones(dom[dev].Gcc.s3, 1.); cusp::blas::axpy(ones, *_p_sol, -p_avg); /* printf("_p_sol_out\n"); cusp::print(*_p_sol); */ // copy solution back to pressure field hipLaunchKernelGGL(( copy_p_ghost), dim3(numBlocks_x), dim3(dimBlocks_x), 0, 0, _phi[dev], thrust::raw_pointer_cast(_p_sol->data()), _dom[dev]); // } else { // // write convergence data to file (solver did not run) // if(rank == 0) { // char nam[FILE_NAME_SIZE] = "solver_expd.rec"; // recorder_bicgstab(nam, 0.,0.); // } else { // char nam[FILE_NAME_SIZE] = "solver_prec.rec"; // recorder_bicgstab(nam, 0.,0.); // } // } // clean up hipFree(_phinoghost); delete(_p_sol); delete(_pp); delete(_A_p); } } /*extern "C" void cuda_div_U(void) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); hipSetDevice(dev + dev_start); // write right-hand side cuda_div_U_launch(dev, _u[dev], _v[dev], _w[dev], _divU[dev]); } } */ extern "C" void cuda_ustar_rhs(int dev) { int threads_y = 0; int threads_z = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gfx.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gfx.jnb + 2; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfx.knb < MAX_THREADS_DIM) threads_z = dom[dev].Gfx.knb + 2; else threads_z = MAX_THREADS_DIM; blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) (threads_y-2)); blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) (threads_z-2)); dim3 dimBlocks(threads_y, threads_z); dim3 numBlocks(blocks_y, blocks_z); hipLaunchKernelGGL(( ustar_rhs), dim3(numBlocks), dim3(dimBlocks), 0, 0, rho_f, nu, _u0[dev], _v0[dev], _w0[dev], _p0[dev], _f_x[dev], _conv0_u[dev], _conv_u[dev], _u_star[dev], _dom[dev], dt, dt0); } extern "C" void cuda_vstar_rhs(int dev) { int threads_z = 0; int threads_x = 0; int blocks_z = 0; int blocks_x = 0; if(dom[dev].Gfy.knb < MAX_THREADS_DIM) threads_z = dom[dev].Gfx.knb + 2; else threads_z = MAX_THREADS_DIM; if(dom[dev].Gfy.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gfx.inb + 2; else threads_x = MAX_THREADS_DIM; blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) (threads_z-2)); blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) (threads_x-2)); dim3 dimBlocks(threads_z, threads_x); dim3 numBlocks(blocks_z, blocks_x); hipLaunchKernelGGL(( vstar_rhs), dim3(numBlocks), dim3(dimBlocks), 0, 0, rho_f, nu, _u0[dev], _v0[dev], _w0[dev], _p0[dev], _f_y[dev], _conv0_v[dev], _conv_v[dev], _v_star[dev], _dom[dev], dt, dt0); } extern "C" void cuda_wstar_rhs(int dev) { int threads_x = 0; int threads_y = 0; int blocks_x = 0; int blocks_y = 0; if(dom[dev].Gfz.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gfz.inb + 2; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfz.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gfz.jnb + 2; else threads_y = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) (threads_x-2)); blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) (threads_y-2)); dim3 dimBlocks(threads_x, threads_y); dim3 numBlocks(blocks_x, blocks_y); hipLaunchKernelGGL(( wstar_rhs), dim3(numBlocks), dim3(dimBlocks), 0, 0, rho_f, nu, _u0[dev], _v0[dev], _w0[dev], _p0[dev], _f_z[dev], _conv0_w[dev], _conv_w[dev], _w_star[dev], _dom[dev], dt, dt0); } extern "C" void cuda_PP_rhs(int dev) { int threads_x = 0; int threads_y = 0; int blocks_x = 0; int blocks_y = 0; if(dom[dev].Gcc.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gcc.inb + 2; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gcc.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gcc.jnb + 2; else threads_y = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) (threads_x-2)); blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) (threads_y-2)); dim3 dimBlocks(threads_x, threads_y); dim3 numBlocks(blocks_x, blocks_y); hipLaunchKernelGGL(( PP_rhs), dim3(numBlocks), dim3(dimBlocks), 0, 0, rho_f, _u_star[dev], _v_star[dev], _w_star[dev], _rhs_p[dev], _dom[dev], dt); } extern "C" void cuda_div_U_launch(int dev, real *u, real *v, real *w, real *divU) { int threads_x = 0; int threads_y = 0; int blocks_x = 0; int blocks_y = 0; if(dom[dev].Gcc.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gcc.inb + 2; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gcc.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gcc.jnb + 2; else threads_y = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) (threads_x-2)); blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) (threads_y-2)); dim3 dimBlocks(threads_x, threads_y); dim3 numBlocks(blocks_x, blocks_y); hipLaunchKernelGGL(( div_U), dim3(numBlocks), dim3(dimBlocks), 0, 0, u, v, w, divU, _dom[dev]); }
7046aaa909443d241e800fbfc1496ee0241614ea.cu
/******************************************************************************* ********************************* BLUEBOTTLE ********************************** ******************************************************************************* * * Copyright 2012 - 2016 Adam Sierakowski, The Johns Hopkins University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Please contact the Johns Hopkins University to use Bluebottle for * commercial and/or for-profit applications. ******************************************************************************/ #include <cuda.h> #include <cusp/array1d.h> #include <cusp/blas/blas.h> #include <cusp/dia_matrix.h> #include <cusp/monitor.h> #include <cusp/precond/diagonal.h> #include <cusp/krylov/bicgstab.h> #include <cusp/krylov/cg.h> #include <cusp/print.h> #include <thrust/device_ptr.h> #include "cuda_bicgstab.h" #include "cuda_bluebottle.h" //#include "entrySearch.h" #include "cuda_particle.h" #ifdef TEST #include "cuda_testing.h" #endif extern "C" void cuda_ustar_helmholtz(int rank) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); cudaSetDevice(dev + dev_start); // write right-hand side cuda_ustar_rhs(dev); cuda_dom_BC_star(); if(nparts > 0) { cuda_part_BC_star(); } // create temporary U* without ghost cells for bicgstab result cusp::array1d<real, cusp::device_memory> ustar_tmp(dom[dev].Gfx.s3, 0.); // create CUSP diagonal matrix for p cusp::dia_matrix<int, real, cusp::device_memory> *_A_ustar; _A_ustar = new cusp::dia_matrix<int, real, cusp::device_memory> (dom[dev].Gfx._s3, dom[dev].Gfx._s3, 0, 13); // set up the coefficient matrix _A_ustar->diagonal_offsets[0] = -dom[dev].Gfx._s3 + dom[dev].Gfx._s2; _A_ustar->diagonal_offsets[1] = -dom[dev].Gfx._s2; _A_ustar->diagonal_offsets[2] = -dom[dev].Gfx._s2 + dom[dev].Gfx._s1; _A_ustar->diagonal_offsets[3] = -dom[dev].Gfx._s1; _A_ustar->diagonal_offsets[4] = -dom[dev].Gfx._s1 + 2; _A_ustar->diagonal_offsets[5] = -1; _A_ustar->diagonal_offsets[6] = 0; _A_ustar->diagonal_offsets[7] = 1; _A_ustar->diagonal_offsets[8] = dom[dev].Gfx._s1 - 2; _A_ustar->diagonal_offsets[9] = dom[dev].Gfx._s1; _A_ustar->diagonal_offsets[10] = dom[dev].Gfx._s2 - dom[dev].Gfx._s1; _A_ustar->diagonal_offsets[11] = dom[dev].Gfx._s2; _A_ustar->diagonal_offsets[12] = dom[dev].Gfx._s3 - dom[dev].Gfx._s2; // write coefficients using kernel int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gfx._in < MAX_THREADS_DIM) threads_x = dom[dev].Gfx._in; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfx._jn < MAX_THREADS_DIM) threads_y = dom[dev].Gfx._jn; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfx._kn < MAX_THREADS_DIM) threads_z = dom[dev].Gfx._kn; else threads_z = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfx._in / (real) threads_x); blocks_y = (int)ceil((real) dom[dev].Gfx._jn / (real) threads_y); blocks_z = (int)ceil((real) dom[dev].Gfx._kn / (real) threads_z); dim3 dimBlocks_x(threads_y, threads_z); dim3 numBlocks_x(blocks_y, blocks_z); dim3 dimBlocks_y(threads_z, threads_x); dim3 numBlocks_y(blocks_z, blocks_x); dim3 dimBlocks_z(threads_x, threads_y); dim3 numBlocks_z(blocks_x, blocks_y); // create temporary ustar without ghost cells real *_ustar_noghost; (cudaMalloc((void**) &_ustar_noghost, sizeof(real) * dom[dev].Gfx.s3)); // copy u_star into noghost structure for Helmholtz right-hand side copy_u_noghost<<<numBlocks_x, dimBlocks_x>>>(_ustar_noghost, _u_star[dev], _dom[dev]); // build pressure-Poisson coefficient matrix ustar_coeffs_init<<<numBlocks_x, dimBlocks_x>>>(_dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); ustar_coeffs<<<numBlocks_x, dimBlocks_x>>>(nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0]), _flag_u[dev], _flag_v[dev], _flag_w[dev]); /* cusp::dia_matrix<int, real, cusp::host_memory> AA = *_A_ustar; printf("\n"); for(int i = 0; i < dom[dev].Gfx.s3; i++) { for(int j = 0; j < dom[dev].Gfx.s3; j++) { if(j == AA.diagonal_offsets[0] + i) if(AA.values(i, 0) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 0)); else if(j == AA.diagonal_offsets[1] + i) if(AA.values(i, 1) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 1)); else if(j == AA.diagonal_offsets[2] + i) if(AA.values(i, 2) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 2)); else if(j == AA.diagonal_offsets[3] + i) if(AA.values(i, 3) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 3)); else if(j == AA.diagonal_offsets[4] + i) if(AA.values(i, 4) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 4)); else if(j == AA.diagonal_offsets[5] + i) if(AA.values(i, 5) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 5)); else if(j == AA.diagonal_offsets[6] + i) if(AA.values(i, 6) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 6)); else if(j == AA.diagonal_offsets[7] + i) if(AA.values(i, 7) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 7)); else if(j == AA.diagonal_offsets[8] + i) if(AA.values(i, 8) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 8)); else if(j == AA.diagonal_offsets[9] + i) if(AA.values(i, 9) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 9)); else if(j == AA.diagonal_offsets[10] + i) if(AA.values(i, 10) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 10)); else if(j == AA.diagonal_offsets[11] + i) if(AA.values(i, 11) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 11)); else if(j == AA.diagonal_offsets[12] + i) if(AA.values(i, 12) == 0) printf(" "); else printf("%3.0f ", AA.values(i, 12)); else printf(" "); } printf("\n"); } */ // create CUSP pointer to right-hand side thrust::device_ptr<real> _ptr_ustar(_ustar_noghost); cusp::array1d<real, cusp::device_memory> *_ustar_rhs; _ustar_rhs = new cusp::array1d<real, cusp::device_memory>(_ptr_ustar, _ptr_ustar + dom[dev].Gfx._s3); // account for boundary conditions if(bc.uW == PERIODIC) ustar_coeffs_periodic_W<<<numBlocks_x, dimBlocks_x>>>(nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uW == DIRICHLET) ustar_coeffs_dirichlet_W<<<numBlocks_x, dimBlocks_x>>>(bc.uWD, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uE == PERIODIC) ustar_coeffs_periodic_E<<<numBlocks_x, dimBlocks_x>>>(nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uE == DIRICHLET) ustar_coeffs_dirichlet_E<<<numBlocks_x, dimBlocks_x>>>(bc.uED, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uS == PERIODIC) ustar_coeffs_periodic_S<<<numBlocks_y, dimBlocks_y>>>(nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uS == DIRICHLET) ustar_coeffs_dirichlet_S<<<numBlocks_y, dimBlocks_y>>>(bc.uSD, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uN == PERIODIC) ustar_coeffs_periodic_N<<<numBlocks_y, dimBlocks_y>>>(nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uN == DIRICHLET) ustar_coeffs_dirichlet_N<<<numBlocks_y, dimBlocks_y>>>(bc.uND, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uB == PERIODIC) ustar_coeffs_periodic_B<<<numBlocks_z, dimBlocks_z>>>(nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uB == DIRICHLET) ustar_coeffs_dirichlet_B<<<numBlocks_z, dimBlocks_z>>>(bc.uBD, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); if(bc.uT == PERIODIC) ustar_coeffs_periodic_T<<<numBlocks_z, dimBlocks_z>>>(nu, dt, _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); else if(bc.uT == DIRICHLET) ustar_coeffs_dirichlet_T<<<numBlocks_z, dimBlocks_z>>>(bc.uTD, _u_star[dev], _dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0])); ustar_coeffs_particles<<<numBlocks_x, dimBlocks_x>>>(_dom[dev], _A_ustar->values.pitch, thrust::raw_pointer_cast(&_A_ustar->values.values[0]), _flag_u[dev]); // normalize the problem by the right-hand side before sending to CUSP real norm = cusp::blas::nrm2(*_ustar_rhs); if(norm == 0) norm = 1.; cusp::blas::scal(*_ustar_rhs, 1. / norm); // call BiCGSTAB to solve for ustar_tmp cusp::monitor<real> monitor(*_ustar_rhs, pp_max_iter, pp_residual); cusp::precond::diagonal<real, cusp::device_memory> M(*_A_ustar); //cusp::krylov::bicgstab(*_A_ustar, ustar_tmp, *_ustar_rhs, monitor, M); cusp::krylov::cg(*_A_ustar, ustar_tmp, *_ustar_rhs, monitor, M); // write convergence data to file if(rank == 0) { char nam[FILE_NAME_SIZE] = "solver_helmholtz_expd.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } else { char nam[FILE_NAME_SIZE] = "solver_helmholtz_prec.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } if(!monitor.converged()) { printf("The u_star Helmholtz equation did not converge. \n"); exit(EXIT_FAILURE); } // unnormalize the solution cusp::blas::scal(ustar_tmp, norm); // copy solution back to _u_star copy_u_ghost<<<numBlocks_x, dimBlocks_x>>>(_u_star[dev], thrust::raw_pointer_cast(ustar_tmp.data()), _dom[dev]); // clean up delete(_ustar_rhs); delete(_A_ustar); (cudaFree(_ustar_noghost)); #ifdef TEST // copy _u_star to _u cudaMemcpy(_u[dev], _u_star[dev], dom[dev].Gfx.s3b * sizeof(real), cudaMemcpyDeviceToDevice); #endif } } extern "C" void cuda_vstar_helmholtz(int rank) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); cudaSetDevice(dev + dev_start); // write right-hand side cuda_vstar_rhs(dev); cuda_dom_BC_star(); if(nparts > 0) { cuda_part_BC_star(); } // create temporary U* without ghost cells for bicgstab result cusp::array1d<real, cusp::device_memory> vstar_tmp(dom[dev].Gfy.s3, 0.); // create CUSP diagonal matrix for p cusp::dia_matrix<int, real, cusp::device_memory> *_A_vstar; _A_vstar = new cusp::dia_matrix<int, real, cusp::device_memory> (dom[dev].Gfy._s3, dom[dev].Gfy._s3, 0, 13); // set up the coefficient matrix _A_vstar->diagonal_offsets[0] = -dom[dev].Gfy._s3 + dom[dev].Gfy._s2; _A_vstar->diagonal_offsets[1] = -dom[dev].Gfy._s2; _A_vstar->diagonal_offsets[2] = -dom[dev].Gfy._s2 + 2*dom[dev].Gfy._s1; _A_vstar->diagonal_offsets[3] = -dom[dev].Gfy._s1; _A_vstar->diagonal_offsets[4] = -dom[dev].Gfy._s1 + 1; _A_vstar->diagonal_offsets[5] = -1; _A_vstar->diagonal_offsets[6] = 0; _A_vstar->diagonal_offsets[7] = 1; _A_vstar->diagonal_offsets[8] = dom[dev].Gfy._s1 - 1; _A_vstar->diagonal_offsets[9] = dom[dev].Gfy._s1; _A_vstar->diagonal_offsets[10] = dom[dev].Gfy._s2 - 2*dom[dev].Gfy._s1; _A_vstar->diagonal_offsets[11] = dom[dev].Gfy._s2; _A_vstar->diagonal_offsets[12] = dom[dev].Gfy._s3 - dom[dev].Gfy._s2; // write coefficients using kernel int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gfy._in < MAX_THREADS_DIM) threads_x = dom[dev].Gfy._in; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfy._jn < MAX_THREADS_DIM) threads_y = dom[dev].Gfy._jn; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfy._kn < MAX_THREADS_DIM) threads_z = dom[dev].Gfy._kn; else threads_z = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfy._in / (real) threads_x); blocks_y = (int)ceil((real) dom[dev].Gfy._jn / (real) threads_y); blocks_z = (int)ceil((real) dom[dev].Gfy._kn / (real) threads_z); dim3 dimBlocks_x(threads_y, threads_z); dim3 numBlocks_x(blocks_y, blocks_z); dim3 dimBlocks_y(threads_z, threads_x); dim3 numBlocks_y(blocks_z, blocks_x); dim3 dimBlocks_z(threads_x, threads_y); dim3 numBlocks_z(blocks_x, blocks_y); // create temporary ustar without ghost cells real *_vstar_noghost; (cudaMalloc((void**) &_vstar_noghost, sizeof(real) * dom[dev].Gfy.s3)); // copy v_star into noghost structure for Helmholtz right-hand side copy_v_noghost<<<numBlocks_y, dimBlocks_y>>>(_vstar_noghost, _v_star[dev], _dom[dev]); // build pressure-Poisson coefficient matrix vstar_coeffs_init<<<numBlocks_y, dimBlocks_y>>>(_dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); vstar_coeffs<<<numBlocks_y, dimBlocks_y>>>(nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0]), _flag_u[dev], _flag_v[dev], _flag_w[dev]); // account for boundary conditions if(bc.vW == PERIODIC) vstar_coeffs_periodic_W<<<numBlocks_x, dimBlocks_x>>>(nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vW == DIRICHLET) vstar_coeffs_dirichlet_W<<<numBlocks_x, dimBlocks_x>>>(bc.vWD, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vE == PERIODIC) vstar_coeffs_periodic_E<<<numBlocks_x, dimBlocks_x>>>(nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vE == DIRICHLET) vstar_coeffs_dirichlet_E<<<numBlocks_x, dimBlocks_x>>>(bc.vED, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vS == PERIODIC) vstar_coeffs_periodic_S<<<numBlocks_y, dimBlocks_y>>>(nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vS == DIRICHLET) vstar_coeffs_dirichlet_S<<<numBlocks_y, dimBlocks_y>>>(bc.vSD, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vN == PERIODIC) vstar_coeffs_periodic_N<<<numBlocks_y, dimBlocks_y>>>(nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vN == DIRICHLET) vstar_coeffs_dirichlet_N<<<numBlocks_y, dimBlocks_y>>>(bc.vND, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vB == PERIODIC) vstar_coeffs_periodic_B<<<numBlocks_z, dimBlocks_z>>>(nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vB == DIRICHLET) vstar_coeffs_dirichlet_B<<<numBlocks_z, dimBlocks_z>>>(bc.vBD, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); if(bc.vT == PERIODIC) vstar_coeffs_periodic_T<<<numBlocks_z, dimBlocks_z>>>(nu, dt, _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); else if(bc.vT == DIRICHLET) vstar_coeffs_dirichlet_T<<<numBlocks_z, dimBlocks_z>>>(bc.vTD, _v_star[dev], _dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0])); vstar_coeffs_particles<<<numBlocks_y, dimBlocks_y>>>(_dom[dev], _A_vstar->values.pitch, thrust::raw_pointer_cast(&_A_vstar->values.values[0]), _flag_v[dev]); // create CUSP pointer to right-hand side thrust::device_ptr<real> _ptr_vstar(_vstar_noghost); cusp::array1d<real, cusp::device_memory> *_vstar_rhs; _vstar_rhs = new cusp::array1d<real, cusp::device_memory>(_ptr_vstar, _ptr_vstar + dom[dev].Gfy._s3); // normalize the problem by the right-hand side before sending to CUSP real norm = cusp::blas::nrm2(*_vstar_rhs); if(norm == 0) norm = 1.; cusp::blas::scal(*_vstar_rhs, 1. / norm); // call BiCGSTAB to solve for ustar_tmp cusp::monitor<real> monitor(*_vstar_rhs, pp_max_iter, pp_residual); cusp::precond::diagonal<real, cusp::device_memory> M(*_A_vstar); //cusp::krylov::bicgstab(*_A_vstar, vstar_tmp, *_vstar_rhs, monitor, M); cusp::krylov::cg(*_A_vstar, vstar_tmp, *_vstar_rhs, monitor, M); // write convergence data to file if(rank == 0) { char nam[FILE_NAME_SIZE] = "solver_helmholtz_expd.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } else { char nam[FILE_NAME_SIZE] = "solver_helmholtz_prec.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } if(!monitor.converged()) { printf("The v_star Helmholtz equation did not converge. \n"); exit(EXIT_FAILURE); } // unnormalize the solution cusp::blas::scal(vstar_tmp, norm); // copy solution back to _v_star field copy_v_ghost<<<numBlocks_y, dimBlocks_y>>>(_v_star[dev], thrust::raw_pointer_cast(vstar_tmp.data()), _dom[dev]); // clean up delete(_vstar_rhs); delete(_A_vstar); (cudaFree(_vstar_noghost)); #ifdef TEST // copy _v_star to _v cudaMemcpy(_v[dev], _v_star[dev], dom[dev].Gfy.s3b * sizeof(real), cudaMemcpyDeviceToDevice); #endif } } extern "C" void cuda_wstar_helmholtz(int rank) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); cudaSetDevice(dev + dev_start); // write right-hand side cuda_wstar_rhs(dev); cuda_dom_BC_star(); if(nparts > 0) { cuda_part_BC_star(); } // create temporary U* without ghost cells for bicgstab result cusp::array1d<real, cusp::device_memory> wstar_tmp(dom[dev].Gfz.s3, 0.); // create CUSP diagonal matrix for p cusp::dia_matrix<int, real, cusp::device_memory> *_A_wstar; _A_wstar = new cusp::dia_matrix<int, real, cusp::device_memory> (dom[dev].Gfz._s3, dom[dev].Gfz._s3, 0, 13); // set up the coefficient matrix _A_wstar->diagonal_offsets[0] = -dom[dev].Gfz._s3 + 2*dom[dev].Gfz._s2; _A_wstar->diagonal_offsets[1] = -dom[dev].Gfz._s2; _A_wstar->diagonal_offsets[2] = -dom[dev].Gfz._s2 + dom[dev].Gfz._s1; _A_wstar->diagonal_offsets[3] = -dom[dev].Gfz._s1; _A_wstar->diagonal_offsets[4] = -dom[dev].Gfz._s1 + 1; _A_wstar->diagonal_offsets[5] = -1; _A_wstar->diagonal_offsets[6] = 0; _A_wstar->diagonal_offsets[7] = 1; _A_wstar->diagonal_offsets[8] = dom[dev].Gfz._s1 - 1; _A_wstar->diagonal_offsets[9] = dom[dev].Gfz._s1; _A_wstar->diagonal_offsets[10] = dom[dev].Gfz._s2 - dom[dev].Gfz._s1; _A_wstar->diagonal_offsets[11] = dom[dev].Gfz._s2; _A_wstar->diagonal_offsets[12] = dom[dev].Gfz._s3 - 2*dom[dev].Gfz._s2; // write coefficients using kernel int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gfz._in < MAX_THREADS_DIM) threads_x = dom[dev].Gfz._in; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfz._jn < MAX_THREADS_DIM) threads_y = dom[dev].Gfz._jn; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfz._kn < MAX_THREADS_DIM) threads_z = dom[dev].Gfz._kn; else threads_z = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfz._in / (real) threads_x); blocks_y = (int)ceil((real) dom[dev].Gfz._jn / (real) threads_y); blocks_z = (int)ceil((real) dom[dev].Gfz._kn / (real) threads_z); dim3 dimBlocks_x(threads_y, threads_z); dim3 numBlocks_x(blocks_y, blocks_z); dim3 dimBlocks_y(threads_z, threads_x); dim3 numBlocks_y(blocks_z, blocks_x); dim3 dimBlocks_z(threads_x, threads_y); dim3 numBlocks_z(blocks_x, blocks_y); // create temporary ustar without ghost cells real *_wstar_noghost; (cudaMalloc((void**) &_wstar_noghost, sizeof(real) * dom[dev].Gfz.s3)); // copy w_star into noghost structure for Helmholtz right-hand side copy_w_noghost<<<numBlocks_z, dimBlocks_z>>>(_wstar_noghost, _w_star[dev], _dom[dev]); // build pressure-Poisson coefficient matrix wstar_coeffs_init<<<numBlocks_z, dimBlocks_z>>>(_dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); wstar_coeffs<<<numBlocks_z, dimBlocks_z>>>(nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0]), _flag_u[dev], _flag_v[dev], _flag_w[dev]); // account for boundary conditions if(bc.wW == PERIODIC) wstar_coeffs_periodic_W<<<numBlocks_x, dimBlocks_x>>>(nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wW == DIRICHLET) wstar_coeffs_dirichlet_W<<<numBlocks_x, dimBlocks_x>>>(bc.wWD, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wE == PERIODIC) wstar_coeffs_periodic_E<<<numBlocks_x, dimBlocks_x>>>(nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wE == DIRICHLET) wstar_coeffs_dirichlet_E<<<numBlocks_x, dimBlocks_x>>>(bc.wED, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wS == PERIODIC) wstar_coeffs_periodic_S<<<numBlocks_y, dimBlocks_y>>>(nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wS == DIRICHLET) wstar_coeffs_dirichlet_S<<<numBlocks_y, dimBlocks_y>>>(bc.wSD, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wN == PERIODIC) wstar_coeffs_periodic_N<<<numBlocks_y, dimBlocks_y>>>(nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wN == DIRICHLET) wstar_coeffs_dirichlet_N<<<numBlocks_y, dimBlocks_y>>>(bc.wND, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wB == PERIODIC) wstar_coeffs_periodic_B<<<numBlocks_z, dimBlocks_z>>>(nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wB == DIRICHLET) wstar_coeffs_dirichlet_B<<<numBlocks_z, dimBlocks_z>>>(bc.wBD, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); if(bc.wT == PERIODIC) wstar_coeffs_periodic_T<<<numBlocks_z, dimBlocks_z>>>(nu, dt, _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); else if(bc.wT == DIRICHLET) wstar_coeffs_dirichlet_T<<<numBlocks_z, dimBlocks_z>>>(bc.wTD, _w_star[dev], _dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0])); wstar_coeffs_particles<<<numBlocks_z, dimBlocks_z>>>(_dom[dev], _A_wstar->values.pitch, thrust::raw_pointer_cast(&_A_wstar->values.values[0]), _flag_w[dev]); // create CUSP pointer to right-hand side thrust::device_ptr<real> _ptr_wstar(_wstar_noghost); cusp::array1d<real, cusp::device_memory> *_wstar_rhs; _wstar_rhs = new cusp::array1d<real, cusp::device_memory>(_ptr_wstar, _ptr_wstar + dom[dev].Gfz.s3); // normalize the problem by the right-hand side before sending to CUSP real norm = cusp::blas::nrm2(*_wstar_rhs); if(norm == 0) norm = 1.; cusp::blas::scal(*_wstar_rhs, 1. / norm); // call BiCGSTAB to solve for ustar_tmp cusp::monitor<real> monitor(*_wstar_rhs, pp_max_iter, pp_residual); cusp::precond::diagonal<real, cusp::device_memory> M(*_A_wstar); //cusp::krylov::bicgstab(*_A_wstar, wstar_tmp, *_wstar_rhs, monitor, M); cusp::krylov::cg(*_A_wstar, wstar_tmp, *_wstar_rhs, monitor, M); // write convergence data to file if(rank == 0) { char nam[FILE_NAME_SIZE] = "solver_helmholtz_expd.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } else { char nam[FILE_NAME_SIZE] = "solver_helmholtz_prec.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } if(!monitor.converged()) { printf("The w_star Helmholtz equation did not converge. \n"); exit(EXIT_FAILURE); } // unnormalize the solution cusp::blas::scal(wstar_tmp, norm); // copy solution back to _v_star field copy_w_ghost<<<numBlocks_z, dimBlocks_z>>>(_w_star[dev], thrust::raw_pointer_cast(wstar_tmp.data()), _dom[dev]); // clean up delete(_wstar_rhs); delete(_A_wstar); (cudaFree(_wstar_noghost)); #ifdef TEST // copy _w_star to _w cudaMemcpy(_w[dev], _w_star[dev], dom[dev].Gfz.s3b * sizeof(real), cudaMemcpyDeviceToDevice); #endif } } extern "C" void cuda_PP_bicgstab(int rank) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); cudaSetDevice(dev + dev_start); // write right-hand side cuda_PP_rhs(dev); if(nparts > 0) { cuda_part_BC_p(dev); } // create CUSP diagonal matrix for p cusp::dia_matrix<int, real, cusp::device_memory> *_A_p; _A_p = new cusp::dia_matrix<int, real, cusp::device_memory> (dom[dev].Gcc._s3, dom[dev].Gcc._s3, 0, 13); // set up the coefficient matrix _A_p->diagonal_offsets[0] = -dom[dev].Gcc._s3 + dom[dev].Gcc._s2; _A_p->diagonal_offsets[1] = -dom[dev].Gcc._s2; _A_p->diagonal_offsets[2] = -dom[dev].Gcc._s2 + dom[dev].Gcc._s1; _A_p->diagonal_offsets[3] = -dom[dev].Gcc._s1; _A_p->diagonal_offsets[4] = -dom[dev].Gcc._s1 + 1; _A_p->diagonal_offsets[5] = -1; _A_p->diagonal_offsets[6] = 0; _A_p->diagonal_offsets[7] = 1; _A_p->diagonal_offsets[8] = dom[dev].Gcc._s1 - 1; _A_p->diagonal_offsets[9] = dom[dev].Gcc._s1; _A_p->diagonal_offsets[10] = dom[dev].Gcc._s2 - dom[dev].Gcc._s1; _A_p->diagonal_offsets[11] = dom[dev].Gcc._s2; _A_p->diagonal_offsets[12] = dom[dev].Gcc._s3 - dom[dev].Gcc._s2; // write coefficients using kernel int threads_x = 0; int threads_y = 0; int threads_z = 0; int blocks_x = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gcc._in < MAX_THREADS_DIM) threads_x = dom[dev].Gcc._in; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gcc._jn < MAX_THREADS_DIM) threads_y = dom[dev].Gcc._jn; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gcc._kn < MAX_THREADS_DIM) threads_z = dom[dev].Gcc._kn; else threads_z = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gcc._in / (real) threads_x); blocks_y = (int)ceil((real) dom[dev].Gcc._jn / (real) threads_y); blocks_z = (int)ceil((real) dom[dev].Gcc._kn / (real) threads_z); dim3 dimBlocks_x(threads_y, threads_z); dim3 numBlocks_x(blocks_y, blocks_z); dim3 dimBlocks_y(threads_z, threads_x); dim3 numBlocks_y(blocks_z, blocks_x); dim3 dimBlocks_z(threads_x, threads_y); dim3 numBlocks_z(blocks_x, blocks_y); // build pressure-Poisson coefficient matrix coeffs_init<<<numBlocks_x, dimBlocks_x>>>(_dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0])); coeffs<<<numBlocks_x, dimBlocks_x>>>(_dom[dev], _flag_u[dev], _flag_v[dev], _flag_w[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0])); if(bc.pW == PERIODIC) coeffs_periodic_W<<<numBlocks_x, dimBlocks_x>>>(_dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_u[dev]); if(bc.pE == PERIODIC) coeffs_periodic_E<<<numBlocks_x, dimBlocks_x>>>(_dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_u[dev]); if(bc.pS == PERIODIC) coeffs_periodic_S<<<numBlocks_y, dimBlocks_y>>>(_dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_v[dev]); if(bc.pN == PERIODIC) coeffs_periodic_N<<<numBlocks_y, dimBlocks_y>>>(_dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_v[dev]); if(bc.pB == PERIODIC) coeffs_periodic_B<<<numBlocks_z, dimBlocks_z>>>(_dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_w[dev]); if(bc.pT == PERIODIC) coeffs_periodic_T<<<numBlocks_z, dimBlocks_z>>>(_dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _flag_w[dev]); coeffs_particle<<<numBlocks_x, dimBlocks_x>>>(_dom[dev], _A_p->values.pitch, thrust::raw_pointer_cast(&_A_p->values.values[0]), _phase[dev]); /* cusp::dia_matrix<int, real, cusp::host_memory> AA = *_A_p; printf("\n"); for(int i = 0; i < dom[dev].Gcc.s3; i++) { for(int j = 0; j < dom[dev].Gcc.s3; j++) { if(j == AA.diagonal_offsets[0] + i) if(AA.values(i, 0) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 0)); else if(j == AA.diagonal_offsets[1] + i) if(AA.values(i, 1) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 1)); else if(j == AA.diagonal_offsets[2] + i) if(AA.values(i, 2) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 2)); else if(j == AA.diagonal_offsets[3] + i) if(AA.values(i, 3) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 3)); else if(j == AA.diagonal_offsets[4] + i) if(AA.values(i, 4) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 4)); else if(j == AA.diagonal_offsets[5] + i) if(AA.values(i, 5) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 5)); else if(j == AA.diagonal_offsets[6] + i) if(AA.values(i, 6) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 6)); else if(j == AA.diagonal_offsets[7] + i) if(AA.values(i, 7) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 7)); else if(j == AA.diagonal_offsets[8] + i) if(AA.values(i, 8) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 8)); else if(j == AA.diagonal_offsets[9] + i) if(AA.values(i, 9) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 9)); else if(j == AA.diagonal_offsets[10] + i) if(AA.values(i, 10) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 10)); else if(j == AA.diagonal_offsets[11] + i) if(AA.values(i, 11) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 11)); else if(j == AA.diagonal_offsets[12] + i) if(AA.values(i, 12) == 0) printf(" "); else printf("%1.0f ", AA.values(i, 12)); else printf(" "); } printf("\n"); } */ /* cusp::dia_matrix<int, real, cusp::host_memory> AA = *_A_p; FILE *mat = fopen("mat.txt", "w"); for(int i = 0; i < dom[dev].Gcc.s3; i++) { for(int j = 0; j < dom[dev].Gcc.s3; j++) { if(j == AA.diagonal_offsets[0] + i) if(AA.values(i, 0) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 0)); else if(j == AA.diagonal_offsets[1] + i) if(AA.values(i, 1) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 1)); else if(j == AA.diagonal_offsets[2] + i) if(AA.values(i, 2) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 2)); else if(j == AA.diagonal_offsets[3] + i) if(AA.values(i, 3) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 3)); else if(j == AA.diagonal_offsets[4] + i) if(AA.values(i, 4) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 4)); else if(j == AA.diagonal_offsets[5] + i) if(AA.values(i, 5) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 5)); else if(j == AA.diagonal_offsets[6] + i) if(AA.values(i, 6) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 6)); else if(j == AA.diagonal_offsets[7] + i) if(AA.values(i, 7) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 7)); else if(j == AA.diagonal_offsets[8] + i) if(AA.values(i, 8) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 8)); else if(j == AA.diagonal_offsets[9] + i) if(AA.values(i, 9) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 9)); else if(j == AA.diagonal_offsets[10] + i) if(AA.values(i, 10) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 10)); else if(j == AA.diagonal_offsets[11] + i) if(AA.values(i, 11) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 11)); else if(j == AA.diagonal_offsets[12] + i) if(AA.values(i, 12) == 0) fprintf(mat,"%f ", 0.); else fprintf(mat,"%f ", AA.values(i, 12)); else fprintf(mat,"%f ", 0.); } } fclose(mat); */ // copy p0 to array without ghost cells and use it as an initial guess and solution space real *_phinoghost; (cudaMalloc((void**) &_phinoghost, sizeof(real)*dom[dev].Gcc.s3)); copy_p_noghost<<<numBlocks_x, dimBlocks_x>>>(_phinoghost, _phi[dev], _dom[dev]); thrust::device_ptr<real> _ptr_p_sol(_phinoghost); cusp::array1d<real, cusp::device_memory> *_p_sol; _p_sol = new cusp::array1d<real, cusp::device_memory>(_ptr_p_sol, _ptr_p_sol + dom[dev].Gcc._s3); // create CUSP pointer to right-hand side thrust::device_ptr<real> _ptr_p(_rhs_p[dev]); cusp::array1d<real, cusp::device_memory> *_pp; _pp = new cusp::array1d<real, cusp::device_memory>(_ptr_p, _ptr_p + dom[dev].Gcc._s3); /* printf("%e\n", dt); printf("_p_sol_in\n"); cusp::print(*_p_sol); printf("_pp\n"); cusp::print(*_pp); */ // normalize the problem by the right-hand side before sending to CUSP real norm = cusp::blas::nrm2(*_pp); //printf("norm = %e\n", norm/dom[dev].Gcc.s3); //if(norm > 100.*pp_residual) {//== 0) if(norm == 0) norm = 1.; cusp::blas::scal(*_pp, 1. / norm); cusp::blas::scal(*_p_sol, 1. / norm); /*cusp::array1d<real, cusp::host_memory> PP = *_pp; cusp::blas::scal(PP, norm); //cusp::print(PP); real ppsum = 0.; for(int s = 0; s < dom[dev].Gcc.s3; s++) { ppsum += PP[s]*dom[dev].dx*dom[dev].dy*dom[dev].dz; } printf("PPSUM_1 = %e\n", ppsum*norm*dt/rho_f); */ // call BiCGSTAB to solve for p_sol cusp::monitor<real> monitor(*_pp, pp_max_iter, pp_residual); cusp::precond::diagonal<real, cusp::device_memory> M(*_A_p); cusp::krylov::bicgstab(*_A_p, *_p_sol, *_pp, monitor, M); // write convergence data to file if(rank == 0) { char nam[FILE_NAME_SIZE] = "solver_expd.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } else { char nam[FILE_NAME_SIZE] = "solver_prec.rec"; recorder_bicgstab(nam, monitor.iteration_count(), monitor.residual_norm()); } if(!monitor.converged()) { printf("The pressure-Poisson equation did not converge. \n"); exit(EXIT_FAILURE); } // unnormalize the solution cusp::blas::scal(*_p_sol, norm); // calculate average pressure //real p_avg = avg_entries(dom[dev].Gcc.s3, // thrust::raw_pointer_cast(_p_sol->data())); real p_avg = thrust::reduce(_p_sol->begin(), _p_sol->end(), (real) 0., thrust::plus<real>()) / dom[dev].Gcc.s3; // subtract average value from pressure cusp::array1d<real, cusp::device_memory> ones(dom[dev].Gcc.s3, 1.); cusp::blas::axpy(ones, *_p_sol, -p_avg); /* printf("_p_sol_out\n"); cusp::print(*_p_sol); */ // copy solution back to pressure field copy_p_ghost<<<numBlocks_x, dimBlocks_x>>>(_phi[dev], thrust::raw_pointer_cast(_p_sol->data()), _dom[dev]); // } else { // // write convergence data to file (solver did not run) // if(rank == 0) { // char nam[FILE_NAME_SIZE] = "solver_expd.rec"; // recorder_bicgstab(nam, 0.,0.); // } else { // char nam[FILE_NAME_SIZE] = "solver_prec.rec"; // recorder_bicgstab(nam, 0.,0.); // } // } // clean up cudaFree(_phinoghost); delete(_p_sol); delete(_pp); delete(_A_p); } } /*extern "C" void cuda_div_U(void) { // CPU thread for multi-GPU #pragma omp parallel num_threads(nsubdom) { int dev = omp_get_thread_num(); cudaSetDevice(dev + dev_start); // write right-hand side cuda_div_U_launch(dev, _u[dev], _v[dev], _w[dev], _divU[dev]); } } */ extern "C" void cuda_ustar_rhs(int dev) { int threads_y = 0; int threads_z = 0; int blocks_y = 0; int blocks_z = 0; if(dom[dev].Gfx.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gfx.jnb + 2; else threads_y = MAX_THREADS_DIM; if(dom[dev].Gfx.knb < MAX_THREADS_DIM) threads_z = dom[dev].Gfx.knb + 2; else threads_z = MAX_THREADS_DIM; blocks_y = (int)ceil((real) dom[dev].Gfx.jnb / (real) (threads_y-2)); blocks_z = (int)ceil((real) dom[dev].Gfx.knb / (real) (threads_z-2)); dim3 dimBlocks(threads_y, threads_z); dim3 numBlocks(blocks_y, blocks_z); ustar_rhs<<<numBlocks, dimBlocks>>>(rho_f, nu, _u0[dev], _v0[dev], _w0[dev], _p0[dev], _f_x[dev], _conv0_u[dev], _conv_u[dev], _u_star[dev], _dom[dev], dt, dt0); } extern "C" void cuda_vstar_rhs(int dev) { int threads_z = 0; int threads_x = 0; int blocks_z = 0; int blocks_x = 0; if(dom[dev].Gfy.knb < MAX_THREADS_DIM) threads_z = dom[dev].Gfx.knb + 2; else threads_z = MAX_THREADS_DIM; if(dom[dev].Gfy.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gfx.inb + 2; else threads_x = MAX_THREADS_DIM; blocks_z = (int)ceil((real) dom[dev].Gfy.knb / (real) (threads_z-2)); blocks_x = (int)ceil((real) dom[dev].Gfy.inb / (real) (threads_x-2)); dim3 dimBlocks(threads_z, threads_x); dim3 numBlocks(blocks_z, blocks_x); vstar_rhs<<<numBlocks, dimBlocks>>>(rho_f, nu, _u0[dev], _v0[dev], _w0[dev], _p0[dev], _f_y[dev], _conv0_v[dev], _conv_v[dev], _v_star[dev], _dom[dev], dt, dt0); } extern "C" void cuda_wstar_rhs(int dev) { int threads_x = 0; int threads_y = 0; int blocks_x = 0; int blocks_y = 0; if(dom[dev].Gfz.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gfz.inb + 2; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gfz.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gfz.jnb + 2; else threads_y = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gfz.inb / (real) (threads_x-2)); blocks_y = (int)ceil((real) dom[dev].Gfz.jnb / (real) (threads_y-2)); dim3 dimBlocks(threads_x, threads_y); dim3 numBlocks(blocks_x, blocks_y); wstar_rhs<<<numBlocks, dimBlocks>>>(rho_f, nu, _u0[dev], _v0[dev], _w0[dev], _p0[dev], _f_z[dev], _conv0_w[dev], _conv_w[dev], _w_star[dev], _dom[dev], dt, dt0); } extern "C" void cuda_PP_rhs(int dev) { int threads_x = 0; int threads_y = 0; int blocks_x = 0; int blocks_y = 0; if(dom[dev].Gcc.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gcc.inb + 2; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gcc.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gcc.jnb + 2; else threads_y = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) (threads_x-2)); blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) (threads_y-2)); dim3 dimBlocks(threads_x, threads_y); dim3 numBlocks(blocks_x, blocks_y); PP_rhs<<<numBlocks, dimBlocks>>>(rho_f, _u_star[dev], _v_star[dev], _w_star[dev], _rhs_p[dev], _dom[dev], dt); } extern "C" void cuda_div_U_launch(int dev, real *u, real *v, real *w, real *divU) { int threads_x = 0; int threads_y = 0; int blocks_x = 0; int blocks_y = 0; if(dom[dev].Gcc.inb < MAX_THREADS_DIM) threads_x = dom[dev].Gcc.inb + 2; else threads_x = MAX_THREADS_DIM; if(dom[dev].Gcc.jnb < MAX_THREADS_DIM) threads_y = dom[dev].Gcc.jnb + 2; else threads_y = MAX_THREADS_DIM; blocks_x = (int)ceil((real) dom[dev].Gcc.inb / (real) (threads_x-2)); blocks_y = (int)ceil((real) dom[dev].Gcc.jnb / (real) (threads_y-2)); dim3 dimBlocks(threads_x, threads_y); dim3 numBlocks(blocks_x, blocks_y); div_U<<<numBlocks, dimBlocks>>>(u, v, w, divU, _dom[dev]); }
8b69a82ee6c8aeda9680c562eb89e2c6bb950443.hip
// !!! This is a file automatically generated by hipify!!! #include "SelectiveColor.h" #include "opencv2/cudev/util/saturate_cast.hpp" #include <hip/hip_runtime_api.h> #include <math_functions.h> #include <hip/device_functions.h> static __constant__ float one_lut[256] = { 1.0f }; static __constant__ float min_lut[256] = { -0.0f / 255.0f, -1.0f / 255.0f, -2.0f / 255.0f, -3.0f / 255.0f, -4.0f / 255.0f, -5.0f / 255.0f, -6.0f / 255.0f, -7.0f / 255.0f, -8.0f / 255.0f, -9.0f / 255.0f, -10.0f / 255.0f, -11.0f / 255.0f, -12.0f / 255.0f, -13.0f / 255.0f, -14.0f / 255.0f, -15.0f / 255.0f, -16.0f / 255.0f, -17.0f / 255.0f, -18.0f / 255.0f, -19.0f / 255.0f, -20.0f / 255.0f, -21.0f / 255.0f, -22.0f / 255.0f, -23.0f / 255.0f, -24.0f / 255.0f, -25.0f / 255.0f, -26.0f / 255.0f, -27.0f / 255.0f, -28.0f / 255.0f, -29.0f / 255.0f, -30.0f / 255.0f, -31.0f / 255.0f, -32.0f / 255.0f, -33.0f / 255.0f, -34.0f / 255.0f, -35.0f / 255.0f, -36.0f / 255.0f, -37.0f / 255.0f, -38.0f / 255.0f, -39.0f / 255.0f, -40.0f / 255.0f, -41.0f / 255.0f, -42.0f / 255.0f, -43.0f / 255.0f, -44.0f / 255.0f, -45.0f / 255.0f, -46.0f / 255.0f, -47.0f / 255.0f, -48.0f / 255.0f, -49.0f / 255.0f, -50.0f / 255.0f, -51.0f / 255.0f, -52.0f / 255.0f, -53.0f / 255.0f, -54.0f / 255.0f, -55.0f / 255.0f, -56.0f / 255.0f, -57.0f / 255.0f, -58.0f / 255.0f, -59.0f / 255.0f, -60.0f / 255.0f, -61.0f / 255.0f, -62.0f / 255.0f, -63.0f / 255.0f, -64.0f / 255.0f, -65.0f / 255.0f, -66.0f / 255.0f, -67.0f / 255.0f, -68.0f / 255.0f, -69.0f / 255.0f, -70.0f / 255.0f, -71.0f / 255.0f, -72.0f / 255.0f, -73.0f / 255.0f, -74.0f / 255.0f, -75.0f / 255.0f, -76.0f / 255.0f, -77.0f / 255.0f, -78.0f / 255.0f, -79.0f / 255.0f, -80.0f / 255.0f, -81.0f / 255.0f, -82.0f / 255.0f, -83.0f / 255.0f, -84.0f / 255.0f, -85.0f / 255.0f, -86.0f / 255.0f, -87.0f / 255.0f, -88.0f / 255.0f, -89.0f / 255.0f, -90.0f / 255.0f, -91.0f / 255.0f, -92.0f / 255.0f, -93.0f / 255.0f, -94.0f / 255.0f, -95.0f / 255.0f, -96.0f / 255.0f, -97.0f / 255.0f, -98.0f / 255.0f, -99.0f / 255.0f, -100.0f / 255.0f, -101.0f / 255.0f, -102.0f / 255.0f, -103.0f / 255.0f, -104.0f / 255.0f, -105.0f / 255.0f, -106.0f / 255.0f, -107.0f / 255.0f, -108.0f / 255.0f, -109.0f / 255.0f, -110.0f / 255.0f, -111.0f / 255.0f, -112.0f / 255.0f, -113.0f / 255.0f, -114.0f / 255.0f, -115.0f / 255.0f, -116.0f / 255.0f, -117.0f / 255.0f, -118.0f / 255.0f, -119.0f / 255.0f, -120.0f / 255.0f, -121.0f / 255.0f, -122.0f / 255.0f, -123.0f / 255.0f, -124.0f / 255.0f, -125.0f / 255.0f, -126.0f / 255.0f, -127.0f / 255.0f, -128.0f / 255.0f, -129.0f / 255.0f, -130.0f / 255.0f, -131.0f / 255.0f, -132.0f / 255.0f, -133.0f / 255.0f, -134.0f / 255.0f, -135.0f / 255.0f, -136.0f / 255.0f, -137.0f / 255.0f, -138.0f / 255.0f, -139.0f / 255.0f, -140.0f / 255.0f, -141.0f / 255.0f, -142.0f / 255.0f, -143.0f / 255.0f, -144.0f / 255.0f, -145.0f / 255.0f, -146.0f / 255.0f, -147.0f / 255.0f, -148.0f / 255.0f, -149.0f / 255.0f, -150.0f / 255.0f, -151.0f / 255.0f, -152.0f / 255.0f, -153.0f / 255.0f, -154.0f / 255.0f, -155.0f / 255.0f, -156.0f / 255.0f, -157.0f / 255.0f, -158.0f / 255.0f, -159.0f / 255.0f, -160.0f / 255.0f, -161.0f / 255.0f, -162.0f / 255.0f, -163.0f / 255.0f, -164.0f / 255.0f, -165.0f / 255.0f, -166.0f / 255.0f, -167.0f / 255.0f, -168.0f / 255.0f, -169.0f / 255.0f, -170.0f / 255.0f, -171.0f / 255.0f, -172.0f / 255.0f, -173.0f / 255.0f, -174.0f / 255.0f, -175.0f / 255.0f, -176.0f / 255.0f, -177.0f / 255.0f, -178.0f / 255.0f, -179.0f / 255.0f, -180.0f / 255.0f, -181.0f / 255.0f, -182.0f / 255.0f, -183.0f / 255.0f, -184.0f / 255.0f, -185.0f / 255.0f, -186.0f / 255.0f, -187.0f / 255.0f, -188.0f / 255.0f, -189.0f / 255.0f, -190.0f / 255.0f, -191.0f / 255.0f, -192.0f / 255.0f, -193.0f / 255.0f, -194.0f / 255.0f, -195.0f / 255.0f, -196.0f / 255.0f, -197.0f / 255.0f, -198.0f / 255.0f, -199.0f / 255.0f, -200.0f / 255.0f, -201.0f / 255.0f, -202.0f / 255.0f, -203.0f / 255.0f, -204.0f / 255.0f, -205.0f / 255.0f, -206.0f / 255.0f, -207.0f / 255.0f, -208.0f / 255.0f, -209.0f / 255.0f, -210.0f / 255.0f, -211.0f / 255.0f, -212.0f / 255.0f, -213.0f / 255.0f, -214.0f / 255.0f, -215.0f / 255.0f, -216.0f / 255.0f, -217.0f / 255.0f, -218.0f / 255.0f, -219.0f / 255.0f, -220.0f / 255.0f, -221.0f / 255.0f, -222.0f / 255.0f, -223.0f / 255.0f, -224.0f / 255.0f, -225.0f / 255.0f, -226.0f / 255.0f, -227.0f / 255.0f, -228.0f / 255.0f, -229.0f / 255.0f, -230.0f / 255.0f, -231.0f / 255.0f, -232.0f / 255.0f, -233.0f / 255.0f, -234.0f / 255.0f, -235.0f / 255.0f, -236.0f / 255.0f, -237.0f / 255.0f, -238.0f / 255.0f, -239.0f / 255.0f, -240.0f / 255.0f, -241.0f / 255.0f, -242.0f / 255.0f, -243.0f / 255.0f, -244.0f / 255.0f, -245.0f / 255.0f, -246.0f / 255.0f, -247.0f / 255.0f, -248.0f / 255.0f, -249.0f / 255.0f, -250.0f / 255.0f, -251.0f / 255.0f, -252.0f / 255.0f, -253.0f / 255.0f, -254.0f / 255.0f, -255.0f / 255.0f }; static __constant__ float max_lut[256] = { 1.0f - 0.0f / 255.0f, 1.0f - 1.0f / 255.0f, 1.0f - 2.0f / 255.0f, 1.0f - 3.0f / 255.0f, 1.0f - 4.0f / 255.0f, 1.0f - 5.0f / 255.0f, 1.0f - 6.0f / 255.0f, 1.0f - 7.0f / 255.0f, 1.0f - 8.0f / 255.0f, 1.0f - 9.0f / 255.0f, 1.0f - 10.0f / 255.0f, 1.0f - 11.0f / 255.0f, 1.0f - 12.0f / 255.0f, 1.0f - 13.0f / 255.0f, 1.0f - 14.0f / 255.0f, 1.0f - 15.0f / 255.0f, 1.0f - 16.0f / 255.0f, 1.0f - 17.0f / 255.0f, 1.0f - 18.0f / 255.0f, 1.0f - 19.0f / 255.0f, 1.0f - 20.0f / 255.0f, 1.0f - 21.0f / 255.0f, 1.0f - 22.0f / 255.0f, 1.0f - 23.0f / 255.0f, 1.0f - 24.0f / 255.0f, 1.0f - 25.0f / 255.0f, 1.0f - 26.0f / 255.0f, 1.0f - 27.0f / 255.0f, 1.0f - 28.0f / 255.0f, 1.0f - 29.0f / 255.0f, 1.0f - 30.0f / 255.0f, 1.0f - 31.0f / 255.0f, 1.0f - 32.0f / 255.0f, 1.0f - 33.0f / 255.0f, 1.0f - 34.0f / 255.0f, 1.0f - 35.0f / 255.0f, 1.0f - 36.0f / 255.0f, 1.0f - 37.0f / 255.0f, 1.0f - 38.0f / 255.0f, 1.0f - 39.0f / 255.0f, 1.0f - 40.0f / 255.0f, 1.0f - 41.0f / 255.0f, 1.0f - 42.0f / 255.0f, 1.0f - 43.0f / 255.0f, 1.0f - 44.0f / 255.0f, 1.0f - 45.0f / 255.0f, 1.0f - 46.0f / 255.0f, 1.0f - 47.0f / 255.0f, 1.0f - 48.0f / 255.0f, 1.0f - 49.0f / 255.0f, 1.0f - 50.0f / 255.0f, 1.0f - 51.0f / 255.0f, 1.0f - 52.0f / 255.0f, 1.0f - 53.0f / 255.0f, 1.0f - 54.0f / 255.0f, 1.0f - 55.0f / 255.0f, 1.0f - 56.0f / 255.0f, 1.0f - 57.0f / 255.0f, 1.0f - 58.0f / 255.0f, 1.0f - 59.0f / 255.0f, 1.0f - 60.0f / 255.0f, 1.0f - 61.0f / 255.0f, 1.0f - 62.0f / 255.0f, 1.0f - 63.0f / 255.0f, 1.0f - 64.0f / 255.0f, 1.0f - 65.0f / 255.0f, 1.0f - 66.0f / 255.0f, 1.0f - 67.0f / 255.0f, 1.0f - 68.0f / 255.0f, 1.0f - 69.0f / 255.0f, 1.0f - 70.0f / 255.0f, 1.0f - 71.0f / 255.0f, 1.0f - 72.0f / 255.0f, 1.0f - 73.0f / 255.0f, 1.0f - 74.0f / 255.0f, 1.0f - 75.0f / 255.0f, 1.0f - 76.0f / 255.0f, 1.0f - 77.0f / 255.0f, 1.0f - 78.0f / 255.0f, 1.0f - 79.0f / 255.0f, 1.0f - 80.0f / 255.0f, 1.0f - 81.0f / 255.0f, 1.0f - 82.0f / 255.0f, 1.0f - 83.0f / 255.0f, 1.0f - 84.0f / 255.0f, 1.0f - 85.0f / 255.0f, 1.0f - 86.0f / 255.0f, 1.0f - 87.0f / 255.0f, 1.0f - 88.0f / 255.0f, 1.0f - 89.0f / 255.0f, 1.0f - 90.0f / 255.0f, 1.0f - 91.0f / 255.0f, 1.0f - 92.0f / 255.0f, 1.0f - 93.0f / 255.0f, 1.0f - 94.0f / 255.0f, 1.0f - 95.0f / 255.0f, 1.0f - 96.0f / 255.0f, 1.0f - 97.0f / 255.0f, 1.0f - 98.0f / 255.0f, 1.0f - 99.0f / 255.0f, 1.0f - 100.0f / 255.0f, 1.0f - 101.0f / 255.0f, 1.0f - 102.0f / 255.0f, 1.0f - 103.0f / 255.0f, 1.0f - 104.0f / 255.0f, 1.0f - 105.0f / 255.0f, 1.0f - 106.0f / 255.0f, 1.0f - 107.0f / 255.0f, 1.0f - 108.0f / 255.0f, 1.0f - 109.0f / 255.0f, 1.0f - 110.0f / 255.0f, 1.0f - 111.0f / 255.0f, 1.0f - 112.0f / 255.0f, 1.0f - 113.0f / 255.0f, 1.0f - 114.0f / 255.0f, 1.0f - 115.0f / 255.0f, 1.0f - 116.0f / 255.0f, 1.0f - 117.0f / 255.0f, 1.0f - 118.0f / 255.0f, 1.0f - 119.0f / 255.0f, 1.0f - 120.0f / 255.0f, 1.0f - 121.0f / 255.0f, 1.0f - 122.0f / 255.0f, 1.0f - 123.0f / 255.0f, 1.0f - 124.0f / 255.0f, 1.0f - 125.0f / 255.0f, 1.0f - 126.0f / 255.0f, 1.0f - 127.0f / 255.0f, 1.0f - 128.0f / 255.0f, 1.0f - 129.0f / 255.0f, 1.0f - 130.0f / 255.0f, 1.0f - 131.0f / 255.0f, 1.0f - 132.0f / 255.0f, 1.0f - 133.0f / 255.0f, 1.0f - 134.0f / 255.0f, 1.0f - 135.0f / 255.0f, 1.0f - 136.0f / 255.0f, 1.0f - 137.0f / 255.0f, 1.0f - 138.0f / 255.0f, 1.0f - 139.0f / 255.0f, 1.0f - 140.0f / 255.0f, 1.0f - 141.0f / 255.0f, 1.0f - 142.0f / 255.0f, 1.0f - 143.0f / 255.0f, 1.0f - 144.0f / 255.0f, 1.0f - 145.0f / 255.0f, 1.0f - 146.0f / 255.0f, 1.0f - 147.0f / 255.0f, 1.0f - 148.0f / 255.0f, 1.0f - 149.0f / 255.0f, 1.0f - 150.0f / 255.0f, 1.0f - 151.0f / 255.0f, 1.0f - 152.0f / 255.0f, 1.0f - 153.0f / 255.0f, 1.0f - 154.0f / 255.0f, 1.0f - 155.0f / 255.0f, 1.0f - 156.0f / 255.0f, 1.0f - 157.0f / 255.0f, 1.0f - 158.0f / 255.0f, 1.0f - 159.0f / 255.0f, 1.0f - 160.0f / 255.0f, 1.0f - 161.0f / 255.0f, 1.0f - 162.0f / 255.0f, 1.0f - 163.0f / 255.0f, 1.0f - 164.0f / 255.0f, 1.0f - 165.0f / 255.0f, 1.0f - 166.0f / 255.0f, 1.0f - 167.0f / 255.0f, 1.0f - 168.0f / 255.0f, 1.0f - 169.0f / 255.0f, 1.0f - 170.0f / 255.0f, 1.0f - 171.0f / 255.0f, 1.0f - 172.0f / 255.0f, 1.0f - 173.0f / 255.0f, 1.0f - 174.0f / 255.0f, 1.0f - 175.0f / 255.0f, 1.0f - 176.0f / 255.0f, 1.0f - 177.0f / 255.0f, 1.0f - 178.0f / 255.0f, 1.0f - 179.0f / 255.0f, 1.0f - 180.0f / 255.0f, 1.0f - 181.0f / 255.0f, 1.0f - 182.0f / 255.0f, 1.0f - 183.0f / 255.0f, 1.0f - 184.0f / 255.0f, 1.0f - 185.0f / 255.0f, 1.0f - 186.0f / 255.0f, 1.0f - 187.0f / 255.0f, 1.0f - 188.0f / 255.0f, 1.0f - 189.0f / 255.0f, 1.0f - 190.0f / 255.0f, 1.0f - 191.0f / 255.0f, 1.0f - 192.0f / 255.0f, 1.0f - 193.0f / 255.0f, 1.0f - 194.0f / 255.0f, 1.0f - 195.0f / 255.0f, 1.0f - 196.0f / 255.0f, 1.0f - 197.0f / 255.0f, 1.0f - 198.0f / 255.0f, 1.0f - 199.0f / 255.0f, 1.0f - 200.0f / 255.0f, 1.0f - 201.0f / 255.0f, 1.0f - 202.0f / 255.0f, 1.0f - 203.0f / 255.0f, 1.0f - 204.0f / 255.0f, 1.0f - 205.0f / 255.0f, 1.0f - 206.0f / 255.0f, 1.0f - 207.0f / 255.0f, 1.0f - 208.0f / 255.0f, 1.0f - 209.0f / 255.0f, 1.0f - 210.0f / 255.0f, 1.0f - 211.0f / 255.0f, 1.0f - 212.0f / 255.0f, 1.0f - 213.0f / 255.0f, 1.0f - 214.0f / 255.0f, 1.0f - 215.0f / 255.0f, 1.0f - 216.0f / 255.0f, 1.0f - 217.0f / 255.0f, 1.0f - 218.0f / 255.0f, 1.0f - 219.0f / 255.0f, 1.0f - 220.0f / 255.0f, 1.0f - 221.0f / 255.0f, 1.0f - 222.0f / 255.0f, 1.0f - 223.0f / 255.0f, 1.0f - 224.0f / 255.0f, 1.0f - 225.0f / 255.0f, 1.0f - 226.0f / 255.0f, 1.0f - 227.0f / 255.0f, 1.0f - 228.0f / 255.0f, 1.0f - 229.0f / 255.0f, 1.0f - 230.0f / 255.0f, 1.0f - 231.0f / 255.0f, 1.0f - 232.0f / 255.0f, 1.0f - 233.0f / 255.0f, 1.0f - 234.0f / 255.0f, 1.0f - 235.0f / 255.0f, 1.0f - 236.0f / 255.0f, 1.0f - 237.0f / 255.0f, 1.0f - 238.0f / 255.0f, 1.0f - 239.0f / 255.0f, 1.0f - 240.0f / 255.0f, 1.0f - 241.0f / 255.0f, 1.0f - 242.0f / 255.0f, 1.0f - 243.0f / 255.0f, 1.0f - 244.0f / 255.0f, 1.0f - 245.0f / 255.0f, 1.0f - 246.0f / 255.0f, 1.0f - 247.0f / 255.0f, 1.0f - 248.0f / 255.0f, 1.0f - 249.0f / 255.0f, 1.0f - 250.0f / 255.0f, 1.0f - 251.0f / 255.0f, 1.0f - 252.0f / 255.0f, 1.0f - 253.0f / 255.0f, 1.0f - 254.0f / 255.0f, 1.0f - 255.0f / 255.0f }; static __device__ void min_med_max(const unsigned char* color, unsigned char& min, unsigned char& med, unsigned char& max) { unsigned char temp = 0; min = color[0], med = color[1], max = color[2]; if (min > med) { temp = min; min = med; med = temp; } if (med > max) { temp = med; med = max; max = temp; } if (min > med) { temp = min; min = med; med = temp; } } static __device__ float adjust_component(int mode, unsigned char component_color, float omega, float adjust, float black) { static float* mode_lut[2] = { one_lut, max_lut }; (void)black; float result = (/*(-1.0f - adjust) * black*/0.0f - adjust) * mode_lut[mode][component_color]; if (result < min_lut[component_color]) { result = min_lut[component_color]; } else if (result > max_lut[component_color]) { result = max_lut[component_color]; } return result * omega; } static __global__ void cuda_adjust_color_kernel(unsigned char* dst, int dst_step, const unsigned char* src, int src_step, int width, int height, int channels, int mode, int component, float cyan, float magtenta, float yellow, float black) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { unsigned char* dst_color = dst + y * dst_step + x * channels; const unsigned char* src_color = src + y * src_step + x * channels; unsigned char min = 0, med = 0, max = 0; min_med_max(src_color, min, med, max); bool valid = false; float result[3] = { 0.0f }; float omega = 0.0f; switch (component) { case SelectiveColor::Component_Red: case SelectiveColor::Component_Green: case SelectiveColor::Component_Blue: { valid = (max == src_color[component]); omega = static_cast<float>(max) - static_cast<float>(med); break; } case SelectiveColor::Component_Cyan: case SelectiveColor::Component_Magtenta: case SelectiveColor::Component_Yellow: { valid = (min == src_color[component - 3]); omega = static_cast<float>(med) - static_cast<float>(min); break; } default: break; } if (valid) { result[SelectiveColor::Component_Red] = adjust_component(mode, src_color[SelectiveColor::Component_Red], omega, cyan, black); result[SelectiveColor::Component_Green] = adjust_component(mode, src_color[SelectiveColor::Component_Green], omega, magtenta, black); result[SelectiveColor::Component_Blue] = adjust_component(mode, src_color[SelectiveColor::Component_Blue], omega, yellow, black); } dst_color[SelectiveColor::Component_Red] = cv::cudev::saturate_cast<unsigned char>(src_color[SelectiveColor::Component_Red] + result[SelectiveColor::Component_Red]); dst_color[SelectiveColor::Component_Green] = cv::cudev::saturate_cast<unsigned char>(src_color[SelectiveColor::Component_Green] + result[SelectiveColor::Component_Green]); dst_color[SelectiveColor::Component_Blue] = cv::cudev::saturate_cast<unsigned char>(src_color[SelectiveColor::Component_Blue] + result[SelectiveColor::Component_Blue]); } } int cuda_adjust_color(cv::cuda::GpuMat& dst, const cv::cuda::GpuMat& src, int mode, int component, float cyan, float magtenta, float yellow, cv::cuda::Stream& stream) { dim3 block(32, 8, 1); dim3 grid((dst.cols + 31) / block.x, (dst.rows + 7) / block.y, 1); cuda_adjust_color_kernel << < grid, block, 0, static_cast<hipStream_t>(stream.cudaPtr()) >> > ( (unsigned char*)(dst.data), (int)(dst.step), (const unsigned char*)(src.data), (int)(src.step), src.cols, src.rows, src.channels(), mode, component, cyan, magtenta, yellow, 0.0f); return static_cast<int>(hipPeekAtLastError()); }
8b69a82ee6c8aeda9680c562eb89e2c6bb950443.cu
#include "SelectiveColor.h" #include "opencv2/cudev/util/saturate_cast.hpp" #include <cuda_runtime_api.h> #include <math_functions.h> #include <device_functions.h> static __constant__ float one_lut[256] = { 1.0f }; static __constant__ float min_lut[256] = { -0.0f / 255.0f, -1.0f / 255.0f, -2.0f / 255.0f, -3.0f / 255.0f, -4.0f / 255.0f, -5.0f / 255.0f, -6.0f / 255.0f, -7.0f / 255.0f, -8.0f / 255.0f, -9.0f / 255.0f, -10.0f / 255.0f, -11.0f / 255.0f, -12.0f / 255.0f, -13.0f / 255.0f, -14.0f / 255.0f, -15.0f / 255.0f, -16.0f / 255.0f, -17.0f / 255.0f, -18.0f / 255.0f, -19.0f / 255.0f, -20.0f / 255.0f, -21.0f / 255.0f, -22.0f / 255.0f, -23.0f / 255.0f, -24.0f / 255.0f, -25.0f / 255.0f, -26.0f / 255.0f, -27.0f / 255.0f, -28.0f / 255.0f, -29.0f / 255.0f, -30.0f / 255.0f, -31.0f / 255.0f, -32.0f / 255.0f, -33.0f / 255.0f, -34.0f / 255.0f, -35.0f / 255.0f, -36.0f / 255.0f, -37.0f / 255.0f, -38.0f / 255.0f, -39.0f / 255.0f, -40.0f / 255.0f, -41.0f / 255.0f, -42.0f / 255.0f, -43.0f / 255.0f, -44.0f / 255.0f, -45.0f / 255.0f, -46.0f / 255.0f, -47.0f / 255.0f, -48.0f / 255.0f, -49.0f / 255.0f, -50.0f / 255.0f, -51.0f / 255.0f, -52.0f / 255.0f, -53.0f / 255.0f, -54.0f / 255.0f, -55.0f / 255.0f, -56.0f / 255.0f, -57.0f / 255.0f, -58.0f / 255.0f, -59.0f / 255.0f, -60.0f / 255.0f, -61.0f / 255.0f, -62.0f / 255.0f, -63.0f / 255.0f, -64.0f / 255.0f, -65.0f / 255.0f, -66.0f / 255.0f, -67.0f / 255.0f, -68.0f / 255.0f, -69.0f / 255.0f, -70.0f / 255.0f, -71.0f / 255.0f, -72.0f / 255.0f, -73.0f / 255.0f, -74.0f / 255.0f, -75.0f / 255.0f, -76.0f / 255.0f, -77.0f / 255.0f, -78.0f / 255.0f, -79.0f / 255.0f, -80.0f / 255.0f, -81.0f / 255.0f, -82.0f / 255.0f, -83.0f / 255.0f, -84.0f / 255.0f, -85.0f / 255.0f, -86.0f / 255.0f, -87.0f / 255.0f, -88.0f / 255.0f, -89.0f / 255.0f, -90.0f / 255.0f, -91.0f / 255.0f, -92.0f / 255.0f, -93.0f / 255.0f, -94.0f / 255.0f, -95.0f / 255.0f, -96.0f / 255.0f, -97.0f / 255.0f, -98.0f / 255.0f, -99.0f / 255.0f, -100.0f / 255.0f, -101.0f / 255.0f, -102.0f / 255.0f, -103.0f / 255.0f, -104.0f / 255.0f, -105.0f / 255.0f, -106.0f / 255.0f, -107.0f / 255.0f, -108.0f / 255.0f, -109.0f / 255.0f, -110.0f / 255.0f, -111.0f / 255.0f, -112.0f / 255.0f, -113.0f / 255.0f, -114.0f / 255.0f, -115.0f / 255.0f, -116.0f / 255.0f, -117.0f / 255.0f, -118.0f / 255.0f, -119.0f / 255.0f, -120.0f / 255.0f, -121.0f / 255.0f, -122.0f / 255.0f, -123.0f / 255.0f, -124.0f / 255.0f, -125.0f / 255.0f, -126.0f / 255.0f, -127.0f / 255.0f, -128.0f / 255.0f, -129.0f / 255.0f, -130.0f / 255.0f, -131.0f / 255.0f, -132.0f / 255.0f, -133.0f / 255.0f, -134.0f / 255.0f, -135.0f / 255.0f, -136.0f / 255.0f, -137.0f / 255.0f, -138.0f / 255.0f, -139.0f / 255.0f, -140.0f / 255.0f, -141.0f / 255.0f, -142.0f / 255.0f, -143.0f / 255.0f, -144.0f / 255.0f, -145.0f / 255.0f, -146.0f / 255.0f, -147.0f / 255.0f, -148.0f / 255.0f, -149.0f / 255.0f, -150.0f / 255.0f, -151.0f / 255.0f, -152.0f / 255.0f, -153.0f / 255.0f, -154.0f / 255.0f, -155.0f / 255.0f, -156.0f / 255.0f, -157.0f / 255.0f, -158.0f / 255.0f, -159.0f / 255.0f, -160.0f / 255.0f, -161.0f / 255.0f, -162.0f / 255.0f, -163.0f / 255.0f, -164.0f / 255.0f, -165.0f / 255.0f, -166.0f / 255.0f, -167.0f / 255.0f, -168.0f / 255.0f, -169.0f / 255.0f, -170.0f / 255.0f, -171.0f / 255.0f, -172.0f / 255.0f, -173.0f / 255.0f, -174.0f / 255.0f, -175.0f / 255.0f, -176.0f / 255.0f, -177.0f / 255.0f, -178.0f / 255.0f, -179.0f / 255.0f, -180.0f / 255.0f, -181.0f / 255.0f, -182.0f / 255.0f, -183.0f / 255.0f, -184.0f / 255.0f, -185.0f / 255.0f, -186.0f / 255.0f, -187.0f / 255.0f, -188.0f / 255.0f, -189.0f / 255.0f, -190.0f / 255.0f, -191.0f / 255.0f, -192.0f / 255.0f, -193.0f / 255.0f, -194.0f / 255.0f, -195.0f / 255.0f, -196.0f / 255.0f, -197.0f / 255.0f, -198.0f / 255.0f, -199.0f / 255.0f, -200.0f / 255.0f, -201.0f / 255.0f, -202.0f / 255.0f, -203.0f / 255.0f, -204.0f / 255.0f, -205.0f / 255.0f, -206.0f / 255.0f, -207.0f / 255.0f, -208.0f / 255.0f, -209.0f / 255.0f, -210.0f / 255.0f, -211.0f / 255.0f, -212.0f / 255.0f, -213.0f / 255.0f, -214.0f / 255.0f, -215.0f / 255.0f, -216.0f / 255.0f, -217.0f / 255.0f, -218.0f / 255.0f, -219.0f / 255.0f, -220.0f / 255.0f, -221.0f / 255.0f, -222.0f / 255.0f, -223.0f / 255.0f, -224.0f / 255.0f, -225.0f / 255.0f, -226.0f / 255.0f, -227.0f / 255.0f, -228.0f / 255.0f, -229.0f / 255.0f, -230.0f / 255.0f, -231.0f / 255.0f, -232.0f / 255.0f, -233.0f / 255.0f, -234.0f / 255.0f, -235.0f / 255.0f, -236.0f / 255.0f, -237.0f / 255.0f, -238.0f / 255.0f, -239.0f / 255.0f, -240.0f / 255.0f, -241.0f / 255.0f, -242.0f / 255.0f, -243.0f / 255.0f, -244.0f / 255.0f, -245.0f / 255.0f, -246.0f / 255.0f, -247.0f / 255.0f, -248.0f / 255.0f, -249.0f / 255.0f, -250.0f / 255.0f, -251.0f / 255.0f, -252.0f / 255.0f, -253.0f / 255.0f, -254.0f / 255.0f, -255.0f / 255.0f }; static __constant__ float max_lut[256] = { 1.0f - 0.0f / 255.0f, 1.0f - 1.0f / 255.0f, 1.0f - 2.0f / 255.0f, 1.0f - 3.0f / 255.0f, 1.0f - 4.0f / 255.0f, 1.0f - 5.0f / 255.0f, 1.0f - 6.0f / 255.0f, 1.0f - 7.0f / 255.0f, 1.0f - 8.0f / 255.0f, 1.0f - 9.0f / 255.0f, 1.0f - 10.0f / 255.0f, 1.0f - 11.0f / 255.0f, 1.0f - 12.0f / 255.0f, 1.0f - 13.0f / 255.0f, 1.0f - 14.0f / 255.0f, 1.0f - 15.0f / 255.0f, 1.0f - 16.0f / 255.0f, 1.0f - 17.0f / 255.0f, 1.0f - 18.0f / 255.0f, 1.0f - 19.0f / 255.0f, 1.0f - 20.0f / 255.0f, 1.0f - 21.0f / 255.0f, 1.0f - 22.0f / 255.0f, 1.0f - 23.0f / 255.0f, 1.0f - 24.0f / 255.0f, 1.0f - 25.0f / 255.0f, 1.0f - 26.0f / 255.0f, 1.0f - 27.0f / 255.0f, 1.0f - 28.0f / 255.0f, 1.0f - 29.0f / 255.0f, 1.0f - 30.0f / 255.0f, 1.0f - 31.0f / 255.0f, 1.0f - 32.0f / 255.0f, 1.0f - 33.0f / 255.0f, 1.0f - 34.0f / 255.0f, 1.0f - 35.0f / 255.0f, 1.0f - 36.0f / 255.0f, 1.0f - 37.0f / 255.0f, 1.0f - 38.0f / 255.0f, 1.0f - 39.0f / 255.0f, 1.0f - 40.0f / 255.0f, 1.0f - 41.0f / 255.0f, 1.0f - 42.0f / 255.0f, 1.0f - 43.0f / 255.0f, 1.0f - 44.0f / 255.0f, 1.0f - 45.0f / 255.0f, 1.0f - 46.0f / 255.0f, 1.0f - 47.0f / 255.0f, 1.0f - 48.0f / 255.0f, 1.0f - 49.0f / 255.0f, 1.0f - 50.0f / 255.0f, 1.0f - 51.0f / 255.0f, 1.0f - 52.0f / 255.0f, 1.0f - 53.0f / 255.0f, 1.0f - 54.0f / 255.0f, 1.0f - 55.0f / 255.0f, 1.0f - 56.0f / 255.0f, 1.0f - 57.0f / 255.0f, 1.0f - 58.0f / 255.0f, 1.0f - 59.0f / 255.0f, 1.0f - 60.0f / 255.0f, 1.0f - 61.0f / 255.0f, 1.0f - 62.0f / 255.0f, 1.0f - 63.0f / 255.0f, 1.0f - 64.0f / 255.0f, 1.0f - 65.0f / 255.0f, 1.0f - 66.0f / 255.0f, 1.0f - 67.0f / 255.0f, 1.0f - 68.0f / 255.0f, 1.0f - 69.0f / 255.0f, 1.0f - 70.0f / 255.0f, 1.0f - 71.0f / 255.0f, 1.0f - 72.0f / 255.0f, 1.0f - 73.0f / 255.0f, 1.0f - 74.0f / 255.0f, 1.0f - 75.0f / 255.0f, 1.0f - 76.0f / 255.0f, 1.0f - 77.0f / 255.0f, 1.0f - 78.0f / 255.0f, 1.0f - 79.0f / 255.0f, 1.0f - 80.0f / 255.0f, 1.0f - 81.0f / 255.0f, 1.0f - 82.0f / 255.0f, 1.0f - 83.0f / 255.0f, 1.0f - 84.0f / 255.0f, 1.0f - 85.0f / 255.0f, 1.0f - 86.0f / 255.0f, 1.0f - 87.0f / 255.0f, 1.0f - 88.0f / 255.0f, 1.0f - 89.0f / 255.0f, 1.0f - 90.0f / 255.0f, 1.0f - 91.0f / 255.0f, 1.0f - 92.0f / 255.0f, 1.0f - 93.0f / 255.0f, 1.0f - 94.0f / 255.0f, 1.0f - 95.0f / 255.0f, 1.0f - 96.0f / 255.0f, 1.0f - 97.0f / 255.0f, 1.0f - 98.0f / 255.0f, 1.0f - 99.0f / 255.0f, 1.0f - 100.0f / 255.0f, 1.0f - 101.0f / 255.0f, 1.0f - 102.0f / 255.0f, 1.0f - 103.0f / 255.0f, 1.0f - 104.0f / 255.0f, 1.0f - 105.0f / 255.0f, 1.0f - 106.0f / 255.0f, 1.0f - 107.0f / 255.0f, 1.0f - 108.0f / 255.0f, 1.0f - 109.0f / 255.0f, 1.0f - 110.0f / 255.0f, 1.0f - 111.0f / 255.0f, 1.0f - 112.0f / 255.0f, 1.0f - 113.0f / 255.0f, 1.0f - 114.0f / 255.0f, 1.0f - 115.0f / 255.0f, 1.0f - 116.0f / 255.0f, 1.0f - 117.0f / 255.0f, 1.0f - 118.0f / 255.0f, 1.0f - 119.0f / 255.0f, 1.0f - 120.0f / 255.0f, 1.0f - 121.0f / 255.0f, 1.0f - 122.0f / 255.0f, 1.0f - 123.0f / 255.0f, 1.0f - 124.0f / 255.0f, 1.0f - 125.0f / 255.0f, 1.0f - 126.0f / 255.0f, 1.0f - 127.0f / 255.0f, 1.0f - 128.0f / 255.0f, 1.0f - 129.0f / 255.0f, 1.0f - 130.0f / 255.0f, 1.0f - 131.0f / 255.0f, 1.0f - 132.0f / 255.0f, 1.0f - 133.0f / 255.0f, 1.0f - 134.0f / 255.0f, 1.0f - 135.0f / 255.0f, 1.0f - 136.0f / 255.0f, 1.0f - 137.0f / 255.0f, 1.0f - 138.0f / 255.0f, 1.0f - 139.0f / 255.0f, 1.0f - 140.0f / 255.0f, 1.0f - 141.0f / 255.0f, 1.0f - 142.0f / 255.0f, 1.0f - 143.0f / 255.0f, 1.0f - 144.0f / 255.0f, 1.0f - 145.0f / 255.0f, 1.0f - 146.0f / 255.0f, 1.0f - 147.0f / 255.0f, 1.0f - 148.0f / 255.0f, 1.0f - 149.0f / 255.0f, 1.0f - 150.0f / 255.0f, 1.0f - 151.0f / 255.0f, 1.0f - 152.0f / 255.0f, 1.0f - 153.0f / 255.0f, 1.0f - 154.0f / 255.0f, 1.0f - 155.0f / 255.0f, 1.0f - 156.0f / 255.0f, 1.0f - 157.0f / 255.0f, 1.0f - 158.0f / 255.0f, 1.0f - 159.0f / 255.0f, 1.0f - 160.0f / 255.0f, 1.0f - 161.0f / 255.0f, 1.0f - 162.0f / 255.0f, 1.0f - 163.0f / 255.0f, 1.0f - 164.0f / 255.0f, 1.0f - 165.0f / 255.0f, 1.0f - 166.0f / 255.0f, 1.0f - 167.0f / 255.0f, 1.0f - 168.0f / 255.0f, 1.0f - 169.0f / 255.0f, 1.0f - 170.0f / 255.0f, 1.0f - 171.0f / 255.0f, 1.0f - 172.0f / 255.0f, 1.0f - 173.0f / 255.0f, 1.0f - 174.0f / 255.0f, 1.0f - 175.0f / 255.0f, 1.0f - 176.0f / 255.0f, 1.0f - 177.0f / 255.0f, 1.0f - 178.0f / 255.0f, 1.0f - 179.0f / 255.0f, 1.0f - 180.0f / 255.0f, 1.0f - 181.0f / 255.0f, 1.0f - 182.0f / 255.0f, 1.0f - 183.0f / 255.0f, 1.0f - 184.0f / 255.0f, 1.0f - 185.0f / 255.0f, 1.0f - 186.0f / 255.0f, 1.0f - 187.0f / 255.0f, 1.0f - 188.0f / 255.0f, 1.0f - 189.0f / 255.0f, 1.0f - 190.0f / 255.0f, 1.0f - 191.0f / 255.0f, 1.0f - 192.0f / 255.0f, 1.0f - 193.0f / 255.0f, 1.0f - 194.0f / 255.0f, 1.0f - 195.0f / 255.0f, 1.0f - 196.0f / 255.0f, 1.0f - 197.0f / 255.0f, 1.0f - 198.0f / 255.0f, 1.0f - 199.0f / 255.0f, 1.0f - 200.0f / 255.0f, 1.0f - 201.0f / 255.0f, 1.0f - 202.0f / 255.0f, 1.0f - 203.0f / 255.0f, 1.0f - 204.0f / 255.0f, 1.0f - 205.0f / 255.0f, 1.0f - 206.0f / 255.0f, 1.0f - 207.0f / 255.0f, 1.0f - 208.0f / 255.0f, 1.0f - 209.0f / 255.0f, 1.0f - 210.0f / 255.0f, 1.0f - 211.0f / 255.0f, 1.0f - 212.0f / 255.0f, 1.0f - 213.0f / 255.0f, 1.0f - 214.0f / 255.0f, 1.0f - 215.0f / 255.0f, 1.0f - 216.0f / 255.0f, 1.0f - 217.0f / 255.0f, 1.0f - 218.0f / 255.0f, 1.0f - 219.0f / 255.0f, 1.0f - 220.0f / 255.0f, 1.0f - 221.0f / 255.0f, 1.0f - 222.0f / 255.0f, 1.0f - 223.0f / 255.0f, 1.0f - 224.0f / 255.0f, 1.0f - 225.0f / 255.0f, 1.0f - 226.0f / 255.0f, 1.0f - 227.0f / 255.0f, 1.0f - 228.0f / 255.0f, 1.0f - 229.0f / 255.0f, 1.0f - 230.0f / 255.0f, 1.0f - 231.0f / 255.0f, 1.0f - 232.0f / 255.0f, 1.0f - 233.0f / 255.0f, 1.0f - 234.0f / 255.0f, 1.0f - 235.0f / 255.0f, 1.0f - 236.0f / 255.0f, 1.0f - 237.0f / 255.0f, 1.0f - 238.0f / 255.0f, 1.0f - 239.0f / 255.0f, 1.0f - 240.0f / 255.0f, 1.0f - 241.0f / 255.0f, 1.0f - 242.0f / 255.0f, 1.0f - 243.0f / 255.0f, 1.0f - 244.0f / 255.0f, 1.0f - 245.0f / 255.0f, 1.0f - 246.0f / 255.0f, 1.0f - 247.0f / 255.0f, 1.0f - 248.0f / 255.0f, 1.0f - 249.0f / 255.0f, 1.0f - 250.0f / 255.0f, 1.0f - 251.0f / 255.0f, 1.0f - 252.0f / 255.0f, 1.0f - 253.0f / 255.0f, 1.0f - 254.0f / 255.0f, 1.0f - 255.0f / 255.0f }; static __device__ void min_med_max(const unsigned char* color, unsigned char& min, unsigned char& med, unsigned char& max) { unsigned char temp = 0; min = color[0], med = color[1], max = color[2]; if (min > med) { temp = min; min = med; med = temp; } if (med > max) { temp = med; med = max; max = temp; } if (min > med) { temp = min; min = med; med = temp; } } static __device__ float adjust_component(int mode, unsigned char component_color, float omega, float adjust, float black) { static float* mode_lut[2] = { one_lut, max_lut }; (void)black; float result = (/*(-1.0f - adjust) * black*/0.0f - adjust) * mode_lut[mode][component_color]; if (result < min_lut[component_color]) { result = min_lut[component_color]; } else if (result > max_lut[component_color]) { result = max_lut[component_color]; } return result * omega; } static __global__ void cuda_adjust_color_kernel(unsigned char* dst, int dst_step, const unsigned char* src, int src_step, int width, int height, int channels, int mode, int component, float cyan, float magtenta, float yellow, float black) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { unsigned char* dst_color = dst + y * dst_step + x * channels; const unsigned char* src_color = src + y * src_step + x * channels; unsigned char min = 0, med = 0, max = 0; min_med_max(src_color, min, med, max); bool valid = false; float result[3] = { 0.0f }; float omega = 0.0f; switch (component) { case SelectiveColor::Component_Red: case SelectiveColor::Component_Green: case SelectiveColor::Component_Blue: { valid = (max == src_color[component]); omega = static_cast<float>(max) - static_cast<float>(med); break; } case SelectiveColor::Component_Cyan: case SelectiveColor::Component_Magtenta: case SelectiveColor::Component_Yellow: { valid = (min == src_color[component - 3]); omega = static_cast<float>(med) - static_cast<float>(min); break; } default: break; } if (valid) { result[SelectiveColor::Component_Red] = adjust_component(mode, src_color[SelectiveColor::Component_Red], omega, cyan, black); result[SelectiveColor::Component_Green] = adjust_component(mode, src_color[SelectiveColor::Component_Green], omega, magtenta, black); result[SelectiveColor::Component_Blue] = adjust_component(mode, src_color[SelectiveColor::Component_Blue], omega, yellow, black); } dst_color[SelectiveColor::Component_Red] = cv::cudev::saturate_cast<unsigned char>(src_color[SelectiveColor::Component_Red] + result[SelectiveColor::Component_Red]); dst_color[SelectiveColor::Component_Green] = cv::cudev::saturate_cast<unsigned char>(src_color[SelectiveColor::Component_Green] + result[SelectiveColor::Component_Green]); dst_color[SelectiveColor::Component_Blue] = cv::cudev::saturate_cast<unsigned char>(src_color[SelectiveColor::Component_Blue] + result[SelectiveColor::Component_Blue]); } } int cuda_adjust_color(cv::cuda::GpuMat& dst, const cv::cuda::GpuMat& src, int mode, int component, float cyan, float magtenta, float yellow, cv::cuda::Stream& stream) { dim3 block(32, 8, 1); dim3 grid((dst.cols + 31) / block.x, (dst.rows + 7) / block.y, 1); cuda_adjust_color_kernel << < grid, block, 0, static_cast<cudaStream_t>(stream.cudaPtr()) >> > ( (unsigned char*)(dst.data), (int)(dst.step), (const unsigned char*)(src.data), (int)(src.step), src.cols, src.rows, src.channels(), mode, component, cyan, magtenta, yellow, 0.0f); return static_cast<int>(cudaPeekAtLastError()); }
c75b88af74e0daa80297c69f9fc905ceacfa7dc1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "processDepth.h" __global__ void GrayToRGB8(float* srcImage, uchar3* dstImage, int width, int height, float max_value) { const int x_ = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; const int pixel = y * width + x_; if( x_ >= width ) return; if( y >= height ) return; float x = srcImage[pixel] / max_value; float r, g, b; x = x * 6; r = 0.0f; g = 0.0f; b = 0.0f; if ((0 <= x && x <= 1) || (5 <= x && x <= 6)) r = 1.0f; else if (4 <= x && x <= 5) r = x - 4; else if (1 <= x && x <= 2) r = 1.0f - (x - 1); if (1 <= x && x <= 3) g = 1.0f; else if (0 <= x && x <= 1) g = x - 0; else if (3 <= x && x <= 4) g = 1.0f - (x - 3); if (3 <= x && x <= 5) b = 1.0f; else if (2 <= x && x <= 3) b = x - 2; else if (5 <= x && x <= 6) b = 1.0f - (x - 5); dstImage[pixel] = make_uchar3(b * 255, g * 255, r * 255); } hipError_t cudaGrayToRGB8(float* srcDev, uchar3* destDev, size_t width, size_t height, float max_value ) { if( !srcDev || !destDev ) return hipErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width, blockDim.x), iDivUp(height, blockDim.y), 1); hipLaunchKernelGGL(( GrayToRGB8), dim3(gridDim), dim3(blockDim), 0, 0, srcDev, destDev, width, height, max_value ); return CUDA(hipGetLastError()); } __global__ void DivideByMaxValue(float* srcImage, float* dstImage, int width, int height, float max_value) { const int x_ = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; const int pixel = y * width + x_; if( x_ >= width ) return; if( y >= height ) return; float x = srcImage[pixel] / max_value; dstImage[pixel] = x; } hipError_t cudaDivideByMaxValue(float* srcDev, float* destDev, size_t width, size_t height, float max_value ) { if( !srcDev || !destDev ) return hipErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width, blockDim.x), iDivUp(height, blockDim.y), 1); hipLaunchKernelGGL(( DivideByMaxValue), dim3(gridDim), dim3(blockDim), 0, 0, srcDev, destDev, width, height, max_value ); return CUDA(hipGetLastError()); }
c75b88af74e0daa80297c69f9fc905ceacfa7dc1.cu
#include "processDepth.h" __global__ void GrayToRGB8(float* srcImage, uchar3* dstImage, int width, int height, float max_value) { const int x_ = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; const int pixel = y * width + x_; if( x_ >= width ) return; if( y >= height ) return; float x = srcImage[pixel] / max_value; float r, g, b; x = x * 6; r = 0.0f; g = 0.0f; b = 0.0f; if ((0 <= x && x <= 1) || (5 <= x && x <= 6)) r = 1.0f; else if (4 <= x && x <= 5) r = x - 4; else if (1 <= x && x <= 2) r = 1.0f - (x - 1); if (1 <= x && x <= 3) g = 1.0f; else if (0 <= x && x <= 1) g = x - 0; else if (3 <= x && x <= 4) g = 1.0f - (x - 3); if (3 <= x && x <= 5) b = 1.0f; else if (2 <= x && x <= 3) b = x - 2; else if (5 <= x && x <= 6) b = 1.0f - (x - 5); dstImage[pixel] = make_uchar3(b * 255, g * 255, r * 255); } cudaError_t cudaGrayToRGB8(float* srcDev, uchar3* destDev, size_t width, size_t height, float max_value ) { if( !srcDev || !destDev ) return cudaErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width, blockDim.x), iDivUp(height, blockDim.y), 1); GrayToRGB8<<<gridDim, blockDim>>>( srcDev, destDev, width, height, max_value ); return CUDA(cudaGetLastError()); } __global__ void DivideByMaxValue(float* srcImage, float* dstImage, int width, int height, float max_value) { const int x_ = (blockIdx.x * blockDim.x) + threadIdx.x; const int y = (blockIdx.y * blockDim.y) + threadIdx.y; const int pixel = y * width + x_; if( x_ >= width ) return; if( y >= height ) return; float x = srcImage[pixel] / max_value; dstImage[pixel] = x; } cudaError_t cudaDivideByMaxValue(float* srcDev, float* destDev, size_t width, size_t height, float max_value ) { if( !srcDev || !destDev ) return cudaErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width, blockDim.x), iDivUp(height, blockDim.y), 1); DivideByMaxValue<<<gridDim, blockDim>>>( srcDev, destDev, width, height, max_value ); return CUDA(cudaGetLastError()); }
286a9c1e5a931946aadb209433d12fe9f9f41526.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ge_gamma.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int fd = 1; const REAL *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ge_gamma), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ge_gamma), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ge_gamma), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
286a9c1e5a931946aadb209433d12fe9f9f41526.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ge_gamma.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int fd = 1; const REAL *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ge_gamma<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ge_gamma<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ge_gamma<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
faf53d2a90a27f5b48cf10fd8f1befb6eb28a39c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <nvfunctional> #include <cstdio> #include <cassert> #include <chrono> #include <ftk/numeric/inverse_linear_interpolation_solver.hh> #include <ftk/numeric/linear_interpolation.hh> #include <ftk/numeric/clamp.hh> #include <ftk/numeric/symmetric_matrix.hh> #include <ftk/numeric/fixed_point.hh> #include <ftk/numeric/critical_point_type.hh> #include <ftk/numeric/critical_point_test.hh> #include <ftk/mesh/lattice.hh> // #include <ftk/filters/critical_point_lite.hh> #include "common_hip.cuh" template <int scope> __device__ bool check_simplex_cp3t( int current_timestep, const lattice4_t& domain, const lattice4_t& core, const lattice3_t& ext, // array dimension const element43_t& e, const double *V[2], // current and next timesteps const double *gradV[2], // jacobians const double *scalar[2], // scalars cp_t &cp) { typedef ftk::fixed_point<> fp_t; // const int last_timestep = current_timestep - 1; // if (scope == scope_interval && e.corner[3] != last_timestep) if (e.corner[3] != current_timestep) return false; int vertices[4][4], indices[4]; size_t local_indices[4]; for (int i = 0; i < 4; i ++) { for (int j = 0; j < 4; j ++) { vertices[i][j] = e.corner[j] + unit_simplex_offset_4_3<scope>(e.type, i, j); if (vertices[i][j] < domain.st[j] || vertices[i][j] > domain.st[j] + domain.sz[j] - 1) return false; } indices[i] = domain.to_index(vertices[i]); local_indices[i] = ext.to_index(vertices[i]); } double v[4][3]; fp_t vf[4][3]; for (int i = 0; i < 4; i ++) { const size_t k = local_indices[i]; // k = ext.to_index(vertices[i]); for (int j = 0; j < 3; j ++) { v[i][j] = V[unit_simplex_offset_4_3<scope>(e.type, i, 3)][k*3+j]; // V has three channels vf[i][j] = v[i][j]; } } bool succ = robust_critical_point_in_simplex3(vf, indices); if (!succ) return false; double mu[4], cond; bool succ2 = ftk::inverse_lerp_s3v3(v, mu, &cond); //, 0.0); if (!succ2) ftk::clamp_barycentric<4>(mu); if (1) { // (succ2) { // if (!succ2) return false; // linear jacobian interpolation if (gradV[0]) { // have given jacobian double Js[4][3][3], J[3][3]; for (int i = 0; i < 4; i ++) { size_t ii = local_indices[i]; // ext.to_index(vertices[i]); int t = unit_simplex_offset_4_3<scope>(e.type, i, 3); for (int j = 0; j < 3; j ++) for (int k = 0; k < 3; k ++) Js[i][j][k] = gradV[t][ii*9 + j*3 + k]; } ftk::lerp_s3m3x3(Js, mu, J); ftk::make_symmetric3x3(J); cp.type = ftk::critical_point_type_3d(J, true/*symmetric*/); } // scalar interpolation if (scalar[0]) { // have given scalar double values[4]; for (int i = 0; i < 4; i ++) { size_t ii = local_indices[i]; // ext.to_index(vertices[i]); int t = unit_simplex_offset_4_3<scope>(e.type, i, 3); values[i] = scalar[t][ii]; } cp.scalar[0] = ftk::lerp_s3(values, mu); } double X[4][4], x[4]; for (int i = 0; i < 4; i ++) for (int j = 0; j < 4; j ++) X[i][j] = vertices[i][j]; ftk::lerp_s3v4(X, mu, x); cp.x[0] = x[0]; cp.x[1] = x[1]; cp.x[2] = x[2]; cp.t = x[3]; cp.cond = cond; return true; } else return false; } template <int scope> __global__ void sweep_simplices( int current_timestep, const lattice4_t domain, const lattice4_t core, const lattice3_t ext, // array dimension const double *Vc, // current timestep const double *Vn, // next timestep const double *Jc, const double *Jn, const double *Sc, const double *Sn, unsigned long long &ncps, cp_t *cps) { const double *V[2] = {Vc, Vn}; const double *J[2] = {Jc, Jn}; const double *S[2] = {Sc, Sn}; int tid = getGlobalIdx_3D_1D(); const element43_t e = element43_from_index<scope>(core, tid); cp_t cp; bool succ = check_simplex_cp3t<scope>( current_timestep, domain, core, ext, e, V, J, S, cp); if (succ) { unsigned long long i = atomicAdd(&ncps, 1ul); cp.tag = tid; cps[i] = cp; } } template <int scope> static std::vector<cp_t> extract_cp3dt( int current_timestep, const lattice4_t& domain, const lattice4_t& core, const lattice3_t& ext, const double *Vc, const double *Vn, const double *Jc, const double *Jn, const double *Sc, const double *Sn) { auto t0 = std::chrono::high_resolution_clock::now(); const size_t ntasks = core.n() * ntypes_4_3<scope>(); // fprintf(stderr, "ntasks=%zu\n", ntasks); const int maxGridDim = 1024; const int blockSize = 256; const int nBlocks = idivup(ntasks, blockSize); dim3 gridSize; if (nBlocks >= maxGridDim) gridSize = dim3(idivup(nBlocks, maxGridDim), maxGridDim); else gridSize = dim3(nBlocks); double *dVc = NULL, *dVn = NULL; if (Vc) { hipMalloc((void**)&dVc, 3 * sizeof(double) * ext.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dVc"); hipMemcpy(dVc, Vc, 3 * sizeof(double) * ext.n(), hipMemcpyHostToDevice); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying dVc"); } if (Vn) { hipMalloc((void**)&dVn, 3 * sizeof(double) * ext.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dVl"); hipMemcpy(dVn, Vn, 3 * sizeof(double) * ext.n(), hipMemcpyHostToDevice); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying dVl"); } double *dJc = NULL, *dJn = NULL; if (Jc) { hipMalloc((void**)&dJc, 9 * sizeof(double) * ext.n()); hipMemcpy(dJc, Jc, 9 * sizeof(double) * ext.n(), hipMemcpyHostToDevice); } if (Jn) { hipMalloc((void**)&dJn, 9 * sizeof(double) * ext.n()); hipMemcpy(dJn, Jn, 9 * sizeof(double) * ext.n(), hipMemcpyHostToDevice); } double *dSc = NULL, *dSn = NULL; if (Sc) { hipMalloc((void**)&dSc, sizeof(double) * ext.n()); hipMemcpy(dSc, Sc, sizeof(double) * ext.n(), hipMemcpyHostToDevice); } if (Sn) { hipMalloc((void**)&dSn, sizeof(double) * ext.n()); hipMemcpy(dSn, Sn, sizeof(double) * ext.n(), hipMemcpyHostToDevice); } unsigned long long *dncps; // number of cps hipMalloc((void**)&dncps, sizeof(unsigned long long)); hipMemset(dncps, 0, sizeof(unsigned long long)); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dncps"); cp_t *dcps; hipMalloc((void**)&dcps, sizeof(cp_t) * core.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dcps"); hipDeviceSynchronize(); fprintf(stderr, "calling kernel func...\n"); hipLaunchKernelGGL(( sweep_simplices<scope>), dim3(gridSize), dim3(blockSize), 0, 0, current_timestep, domain, core, ext, dVc, dVn, dJc, dJn, dSc, dSn, *dncps, dcps); hipDeviceSynchronize(); checkLastCudaError("[FTK-CUDA] error: sweep_simplices, kernel function"); unsigned long long ncps = 0; hipMemcpy(&ncps, dncps, sizeof(unsigned long long), hipMemcpyDeviceToHost); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipMemcpyDeviceToHost, dncps"); fprintf(stderr, "ncps=%llu\n", ncps); std::vector<cp_t> cps(ncps); hipMemcpy(cps.data(), dcps, sizeof(cp_t) * ncps, hipMemcpyDeviceToHost); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipMemcpyDeviceToHost"); if (dVc) hipFree(dVc); if (dVn) hipFree(dVn); if (dJc) hipFree(dJc); if (dJn) hipFree(dJn); if (dSc) hipFree(dSc); if (dSn) hipFree(dSn); hipFree(dncps); hipFree(dcps); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: hipFree"); hipDeviceSynchronize(); auto t1 = std::chrono::high_resolution_clock::now(); float duration = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() * 1e-9; fprintf(stderr, "exitting gpu kernel, ncps=%llu, time=%f\n", ncps, duration); return cps; } std::vector<cp_t> extract_cp3dt_cuda( int scope, int current_timestep, const ftk::lattice& domain, const ftk::lattice& core, const ftk::lattice& ext, const double *Vc, const double *Vl, const double *Jc, const double *Jl, const double *Sc, const double *Sl) { lattice4_t D(domain); lattice4_t C(core); lattice3_t E(ext); if (scope == scope_interval) return extract_cp3dt<scope_interval>(current_timestep, D, C, E, Vc, Vl, Jc, Jl, Sc, Sl); if (scope == scope_ordinal) return extract_cp3dt<scope_ordinal>(current_timestep, D, C, E, Vc, Vl, Jc, Jl, Sc, Sl); else // scope == 2 return extract_cp3dt<scope_all>(current_timestep, D, C, E, Vc, Vl, Jc, Jl, Sc, Sl); }
faf53d2a90a27f5b48cf10fd8f1befb6eb28a39c.cu
#include <nvfunctional> #include <cstdio> #include <cassert> #include <chrono> #include <ftk/numeric/inverse_linear_interpolation_solver.hh> #include <ftk/numeric/linear_interpolation.hh> #include <ftk/numeric/clamp.hh> #include <ftk/numeric/symmetric_matrix.hh> #include <ftk/numeric/fixed_point.hh> #include <ftk/numeric/critical_point_type.hh> #include <ftk/numeric/critical_point_test.hh> #include <ftk/mesh/lattice.hh> // #include <ftk/filters/critical_point_lite.hh> #include "common.cuh" template <int scope> __device__ bool check_simplex_cp3t( int current_timestep, const lattice4_t& domain, const lattice4_t& core, const lattice3_t& ext, // array dimension const element43_t& e, const double *V[2], // current and next timesteps const double *gradV[2], // jacobians const double *scalar[2], // scalars cp_t &cp) { typedef ftk::fixed_point<> fp_t; // const int last_timestep = current_timestep - 1; // if (scope == scope_interval && e.corner[3] != last_timestep) if (e.corner[3] != current_timestep) return false; int vertices[4][4], indices[4]; size_t local_indices[4]; for (int i = 0; i < 4; i ++) { for (int j = 0; j < 4; j ++) { vertices[i][j] = e.corner[j] + unit_simplex_offset_4_3<scope>(e.type, i, j); if (vertices[i][j] < domain.st[j] || vertices[i][j] > domain.st[j] + domain.sz[j] - 1) return false; } indices[i] = domain.to_index(vertices[i]); local_indices[i] = ext.to_index(vertices[i]); } double v[4][3]; fp_t vf[4][3]; for (int i = 0; i < 4; i ++) { const size_t k = local_indices[i]; // k = ext.to_index(vertices[i]); for (int j = 0; j < 3; j ++) { v[i][j] = V[unit_simplex_offset_4_3<scope>(e.type, i, 3)][k*3+j]; // V has three channels vf[i][j] = v[i][j]; } } bool succ = robust_critical_point_in_simplex3(vf, indices); if (!succ) return false; double mu[4], cond; bool succ2 = ftk::inverse_lerp_s3v3(v, mu, &cond); //, 0.0); if (!succ2) ftk::clamp_barycentric<4>(mu); if (1) { // (succ2) { // if (!succ2) return false; // linear jacobian interpolation if (gradV[0]) { // have given jacobian double Js[4][3][3], J[3][3]; for (int i = 0; i < 4; i ++) { size_t ii = local_indices[i]; // ext.to_index(vertices[i]); int t = unit_simplex_offset_4_3<scope>(e.type, i, 3); for (int j = 0; j < 3; j ++) for (int k = 0; k < 3; k ++) Js[i][j][k] = gradV[t][ii*9 + j*3 + k]; } ftk::lerp_s3m3x3(Js, mu, J); ftk::make_symmetric3x3(J); cp.type = ftk::critical_point_type_3d(J, true/*symmetric*/); } // scalar interpolation if (scalar[0]) { // have given scalar double values[4]; for (int i = 0; i < 4; i ++) { size_t ii = local_indices[i]; // ext.to_index(vertices[i]); int t = unit_simplex_offset_4_3<scope>(e.type, i, 3); values[i] = scalar[t][ii]; } cp.scalar[0] = ftk::lerp_s3(values, mu); } double X[4][4], x[4]; for (int i = 0; i < 4; i ++) for (int j = 0; j < 4; j ++) X[i][j] = vertices[i][j]; ftk::lerp_s3v4(X, mu, x); cp.x[0] = x[0]; cp.x[1] = x[1]; cp.x[2] = x[2]; cp.t = x[3]; cp.cond = cond; return true; } else return false; } template <int scope> __global__ void sweep_simplices( int current_timestep, const lattice4_t domain, const lattice4_t core, const lattice3_t ext, // array dimension const double *Vc, // current timestep const double *Vn, // next timestep const double *Jc, const double *Jn, const double *Sc, const double *Sn, unsigned long long &ncps, cp_t *cps) { const double *V[2] = {Vc, Vn}; const double *J[2] = {Jc, Jn}; const double *S[2] = {Sc, Sn}; int tid = getGlobalIdx_3D_1D(); const element43_t e = element43_from_index<scope>(core, tid); cp_t cp; bool succ = check_simplex_cp3t<scope>( current_timestep, domain, core, ext, e, V, J, S, cp); if (succ) { unsigned long long i = atomicAdd(&ncps, 1ul); cp.tag = tid; cps[i] = cp; } } template <int scope> static std::vector<cp_t> extract_cp3dt( int current_timestep, const lattice4_t& domain, const lattice4_t& core, const lattice3_t& ext, const double *Vc, const double *Vn, const double *Jc, const double *Jn, const double *Sc, const double *Sn) { auto t0 = std::chrono::high_resolution_clock::now(); const size_t ntasks = core.n() * ntypes_4_3<scope>(); // fprintf(stderr, "ntasks=%zu\n", ntasks); const int maxGridDim = 1024; const int blockSize = 256; const int nBlocks = idivup(ntasks, blockSize); dim3 gridSize; if (nBlocks >= maxGridDim) gridSize = dim3(idivup(nBlocks, maxGridDim), maxGridDim); else gridSize = dim3(nBlocks); double *dVc = NULL, *dVn = NULL; if (Vc) { cudaMalloc((void**)&dVc, 3 * sizeof(double) * ext.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dVc"); cudaMemcpy(dVc, Vc, 3 * sizeof(double) * ext.n(), cudaMemcpyHostToDevice); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying dVc"); } if (Vn) { cudaMalloc((void**)&dVn, 3 * sizeof(double) * ext.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dVl"); cudaMemcpy(dVn, Vn, 3 * sizeof(double) * ext.n(), cudaMemcpyHostToDevice); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: copying dVl"); } double *dJc = NULL, *dJn = NULL; if (Jc) { cudaMalloc((void**)&dJc, 9 * sizeof(double) * ext.n()); cudaMemcpy(dJc, Jc, 9 * sizeof(double) * ext.n(), cudaMemcpyHostToDevice); } if (Jn) { cudaMalloc((void**)&dJn, 9 * sizeof(double) * ext.n()); cudaMemcpy(dJn, Jn, 9 * sizeof(double) * ext.n(), cudaMemcpyHostToDevice); } double *dSc = NULL, *dSn = NULL; if (Sc) { cudaMalloc((void**)&dSc, sizeof(double) * ext.n()); cudaMemcpy(dSc, Sc, sizeof(double) * ext.n(), cudaMemcpyHostToDevice); } if (Sn) { cudaMalloc((void**)&dSn, sizeof(double) * ext.n()); cudaMemcpy(dSn, Sn, sizeof(double) * ext.n(), cudaMemcpyHostToDevice); } unsigned long long *dncps; // number of cps cudaMalloc((void**)&dncps, sizeof(unsigned long long)); cudaMemset(dncps, 0, sizeof(unsigned long long)); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dncps"); cp_t *dcps; cudaMalloc((void**)&dcps, sizeof(cp_t) * core.n()); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: allocating dcps"); cudaDeviceSynchronize(); fprintf(stderr, "calling kernel func...\n"); sweep_simplices<scope><<<gridSize, blockSize>>>( current_timestep, domain, core, ext, dVc, dVn, dJc, dJn, dSc, dSn, *dncps, dcps); cudaDeviceSynchronize(); checkLastCudaError("[FTK-CUDA] error: sweep_simplices, kernel function"); unsigned long long ncps = 0; cudaMemcpy(&ncps, dncps, sizeof(unsigned long long), cudaMemcpyDeviceToHost); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaMemcpyDeviceToHost, dncps"); fprintf(stderr, "ncps=%llu\n", ncps); std::vector<cp_t> cps(ncps); cudaMemcpy(cps.data(), dcps, sizeof(cp_t) * ncps, cudaMemcpyDeviceToHost); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaMemcpyDeviceToHost"); if (dVc) cudaFree(dVc); if (dVn) cudaFree(dVn); if (dJc) cudaFree(dJc); if (dJn) cudaFree(dJn); if (dSc) cudaFree(dSc); if (dSn) cudaFree(dSn); cudaFree(dncps); cudaFree(dcps); checkLastCudaError("[FTK-CUDA] error: sweep_simplices: cudaFree"); cudaDeviceSynchronize(); auto t1 = std::chrono::high_resolution_clock::now(); float duration = std::chrono::duration_cast<std::chrono::nanoseconds>(t1 - t0).count() * 1e-9; fprintf(stderr, "exitting gpu kernel, ncps=%llu, time=%f\n", ncps, duration); return cps; } std::vector<cp_t> extract_cp3dt_cuda( int scope, int current_timestep, const ftk::lattice& domain, const ftk::lattice& core, const ftk::lattice& ext, const double *Vc, const double *Vl, const double *Jc, const double *Jl, const double *Sc, const double *Sl) { lattice4_t D(domain); lattice4_t C(core); lattice3_t E(ext); if (scope == scope_interval) return extract_cp3dt<scope_interval>(current_timestep, D, C, E, Vc, Vl, Jc, Jl, Sc, Sl); if (scope == scope_ordinal) return extract_cp3dt<scope_ordinal>(current_timestep, D, C, E, Vc, Vl, Jc, Jl, Sc, Sl); else // scope == 2 return extract_cp3dt<scope_all>(current_timestep, D, C, E, Vc, Vl, Jc, Jl, Sc, Sl); }
57d3a1b2c0ced8e71fca21b7bf73c540c547804d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * _CT_CUDA_CU_ * * Copyright (C) 2017-2021 Tactical Computing Laboratories, LLC * All Rights Reserved * [email protected] * * See LICENSE in the top level directory for licensing details */ #include "CT_CUDA.cuh" #ifdef _CT_CUDA_CUH_ CT_CUDA::CT_CUDA(CTBaseImpl::CTBenchType B, CTBaseImpl::CTAtomType A) : CTBaseImpl("CUDA", B, A), Array(nullptr), Idx(nullptr), d_Array(nullptr), d_Idx(nullptr), memSize(0), iters(0), elems(0), stride(0), threadBlocks(0), threadsPerBlock(0) {} CT_CUDA::~CT_CUDA() {} bool CT_CUDA::PrintCUDADeviceProperties() { int device, deviceCount; hipDeviceProp_t properties; std::cout << "------------------------------------------------" << std::endl; std::cout << " CUDA Device Properties" << std::endl; std::cout << "------------------------------------------------" << std::endl; // Get and print the number of CUDA devices on the platform if(hipGetDeviceCount(&deviceCount) != hipSuccess){ std::cout << "CT_CUDA::PrintCUDADeviceProperties: hipGetDeviceCount failed!" << std::endl; return false; } else{ std::cout << "Number of CUDA enabled devices detected: " << deviceCount << std::endl; } // Get the target device if(hipGetDevice(&device) != hipSuccess){ std::cout << "CT_CUDA::PrintCUDADeviceProperties: hipGetDevice failed!" << std::endl; return false; } // Get the target device properties if(hipGetDeviceProperties(&properties, device) != hipSuccess){ std::cout << "CT_CUDA::PrintCUDADeviceProperties: hipGetDeviceProperties failed!" << std::endl; return false; } // Print out the target device details std::cout << "Target Device Details:" << std::endl; std::cout << "Device Name: " << properties.name << std::endl; std::cout << "Global Memory (bytes): " << properties.totalGlobalMem << std::endl; std::cout << "Compute Capability: " << properties.major << "." << properties.minor << std::endl; return true; } bool CT_CUDA::AllocateData(uint64_t m, uint64_t b, uint64_t t, uint64_t i, uint64_t s) { // save the data memSize = m; threadBlocks = b; threadsPerBlock = t; iters = i; stride = s; // check args if ( threadBlocks <= 0 ) { std::cout << "CT_CUDA::AllocateData: threadBlocks must be greater than 0" << std::endl; return false; } if ( threadsPerBlock <= 0 ) { std::cout << "CT_CUDA::AllocateData: threadsPerBlock must be greater than 0" << std::endl; return false; } if ( iters == 0 ) { std::cout << "CT_CUDA::AllocateData: `iters` cannot be 0" << std::endl; return false; } if ( stride == 0 ) { std::cout << "CT_CUDA::AllocateData: `stride` cannot be 0" << std::endl; return false; } // calculate the number of elements elems = (memSize/8); uint64_t idxMemSize = (sizeof(uint64_t) * (threadBlocks + 1) * iters); uint64_t idxElems = (idxMemSize/8); // test to see whether we'll stride out of bounds uint64_t end = (threadBlocks * iters * stride) - stride; if ( end >= elems ) { std::cout << "CT_CUDA::AllocateData : `Array` is not large enough for threadBlocks=" << threadBlocks << "; iters=" << iters << "; stride=" << stride << std::endl; return false; } // Allocate arrays on the host Array = (uint64_t *) malloc(memSize); if ( Array == nullptr ) { std::cout << "CT_CUDA::AllocateData : 'Array' could not be allocated" << std::endl; return false; } Idx = (uint64_t *) malloc(idxMemSize); if ( Idx == nullptr ) { std::cout << "CT_CUDA::AllocateData : 'Idx' could not be allocated" << std::endl; if(Array){ free(Array); } return false; } // allocate data on the target device if ( hipMalloc(&d_Array, memSize) != hipSuccess ) { std::cout << "CT_CUDA::AllocateData : 'd_Array' could not be allocated on device" << std::endl; hipFree(d_Array); if(Array){ free(Array); } if(Idx){ free(Idx); } return false; } if ( hipMalloc(&d_Idx, idxMemSize) != hipSuccess ) { std::cout << "CT_CUDA::AllocateData : 'd_Idx' could not be alloced on device" << std::endl; hipFree(d_Array); hipFree(d_Idx); if(Array){ free(Array); } if(Idx){ free(Idx); } return false; } // Randomize the arrays on the host srand(time(NULL)); if ( this->GetBenchType() == CT_PTRCHASE ) { for ( unsigned i = 0; i < idxElems; i++ ) { Idx[i] = (uint64_t)(rand()%(idxElems - 1)); } } else { for ( unsigned i = 0; i < idxElems; i++ ) { Idx[i] = (uint64_t)(rand()%(elems - 1)); } } for ( unsigned i=0; i<elems; i++ ) { Array[i] = (uint64_t)(rand()); } // copy arrays from host to target device if ( hipMemcpy(d_Array, Array, memSize, hipMemcpyHostToDevice) != hipSuccess ) { std::cout << "CT_CUDA::AllocateData : 'd_Array' could not be copied to device" << std::endl; hipFree(d_Array); hipFree(d_Idx); if(Array){ free(Array); } if(Idx){ free(Idx); } return false; } if ( hipMemcpy(d_Idx, Idx, idxMemSize, hipMemcpyHostToDevice) != hipSuccess ) { std::cout << "CT_CUDA::AllocateData : 'd_Idx' could not be copied to device" << std::endl; hipFree(d_Array); hipFree(d_Idx); if(Array){ free(Array); } if(Idx){ free(Idx); } return false; } return true; } bool CT_CUDA::Execute(double &Timing, double &GAMS) { CTBaseImpl::CTBenchType BType = this->GetBenchType(); // benchmark type CTBaseImpl::CTAtomType AType = this->GetAtomType(); // atomic type double StartTime = 0.; // start time double EndTime = 0.; // end time double OPS = 0.; // billions of operations // determine benchmark type and launch the desired kernel if ( BType == CT_RAND ) { switch ( AType ) { case CT_ADD: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( RAND_ADD), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: StartTime = this->MySecond(); hipLaunchKernelGGL(( RAND_CAS), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_STRIDE1 ) { switch( AType ) { case CT_ADD: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( STRIDE1_ADD), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( STRIDE1_CAS), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_STRIDEN ) { switch( AType ) { case CT_ADD: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( STRIDEN_ADD), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, iters, stride ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( STRIDEN_CAS), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, iters, stride ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_PTRCHASE ) { /* PTRCHASE kernels use only a single thread per block and, * * as such, threadsPerBlock as specified by the user is ignored. */ switch( AType ) { case CT_ADD: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( PTRCHASE_ADD), dim3(threadBlocks), dim3(1) , 0, 0, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( PTRCHASE_CAS), dim3(threadBlocks), dim3(1) , 0, 0, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_SG ) { switch( AType ) { case CT_ADD: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( SG_ADD), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(4, iters, threadBlocks); break; case CT_CAS: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( SG_CAS), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(4, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_CENTRAL ) { switch( AType ) { case CT_ADD: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( CENTRAL_ADD), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( CENTRAL_CAS), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_SCATTER ) { switch( AType ) { case CT_ADD: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( SCATTER_ADD), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(3, iters, threadBlocks); break; case CT_CAS: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( SCATTER_CAS), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(3, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_GATHER ) { switch( AType ) { case CT_ADD: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( GATHER_ADD), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(3, iters, threadBlocks); break; case CT_CAS: hipDeviceSynchronize(); StartTime = this->MySecond(); hipLaunchKernelGGL(( GATHER_CAS), dim3(threadBlocks), dim3(threadsPerBlock) , 0, 0, d_Array, d_Idx, iters ); hipDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(3, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else { this->ReportBenchError(); return false; } Timing = this->Runtime(StartTime,EndTime); GAMS = OPS/Timing; return true; } bool CT_CUDA::FreeData() { if ( Array ) { free(Array); } if ( Idx ) { free(Idx); } if ( d_Array ) { hipFree(d_Array); } if ( d_Idx ) { hipFree(d_Idx); } return true; } #endif // _CT_CUDA_CUH_ // EOF
57d3a1b2c0ced8e71fca21b7bf73c540c547804d.cu
/* * _CT_CUDA_CU_ * * Copyright (C) 2017-2021 Tactical Computing Laboratories, LLC * All Rights Reserved * [email protected] * * See LICENSE in the top level directory for licensing details */ #include "CT_CUDA.cuh" #ifdef _CT_CUDA_CUH_ CT_CUDA::CT_CUDA(CTBaseImpl::CTBenchType B, CTBaseImpl::CTAtomType A) : CTBaseImpl("CUDA", B, A), Array(nullptr), Idx(nullptr), d_Array(nullptr), d_Idx(nullptr), memSize(0), iters(0), elems(0), stride(0), threadBlocks(0), threadsPerBlock(0) {} CT_CUDA::~CT_CUDA() {} bool CT_CUDA::PrintCUDADeviceProperties() { int device, deviceCount; cudaDeviceProp properties; std::cout << "------------------------------------------------" << std::endl; std::cout << " CUDA Device Properties" << std::endl; std::cout << "------------------------------------------------" << std::endl; // Get and print the number of CUDA devices on the platform if(cudaGetDeviceCount(&deviceCount) != cudaSuccess){ std::cout << "CT_CUDA::PrintCUDADeviceProperties: cudaGetDeviceCount failed!" << std::endl; return false; } else{ std::cout << "Number of CUDA enabled devices detected: " << deviceCount << std::endl; } // Get the target device if(cudaGetDevice(&device) != cudaSuccess){ std::cout << "CT_CUDA::PrintCUDADeviceProperties: cudaGetDevice failed!" << std::endl; return false; } // Get the target device properties if(cudaGetDeviceProperties(&properties, device) != cudaSuccess){ std::cout << "CT_CUDA::PrintCUDADeviceProperties: cudaGetDeviceProperties failed!" << std::endl; return false; } // Print out the target device details std::cout << "Target Device Details:" << std::endl; std::cout << "Device Name: " << properties.name << std::endl; std::cout << "Global Memory (bytes): " << properties.totalGlobalMem << std::endl; std::cout << "Compute Capability: " << properties.major << "." << properties.minor << std::endl; return true; } bool CT_CUDA::AllocateData(uint64_t m, uint64_t b, uint64_t t, uint64_t i, uint64_t s) { // save the data memSize = m; threadBlocks = b; threadsPerBlock = t; iters = i; stride = s; // check args if ( threadBlocks <= 0 ) { std::cout << "CT_CUDA::AllocateData: threadBlocks must be greater than 0" << std::endl; return false; } if ( threadsPerBlock <= 0 ) { std::cout << "CT_CUDA::AllocateData: threadsPerBlock must be greater than 0" << std::endl; return false; } if ( iters == 0 ) { std::cout << "CT_CUDA::AllocateData: `iters` cannot be 0" << std::endl; return false; } if ( stride == 0 ) { std::cout << "CT_CUDA::AllocateData: `stride` cannot be 0" << std::endl; return false; } // calculate the number of elements elems = (memSize/8); uint64_t idxMemSize = (sizeof(uint64_t) * (threadBlocks + 1) * iters); uint64_t idxElems = (idxMemSize/8); // test to see whether we'll stride out of bounds uint64_t end = (threadBlocks * iters * stride) - stride; if ( end >= elems ) { std::cout << "CT_CUDA::AllocateData : `Array` is not large enough for threadBlocks=" << threadBlocks << "; iters=" << iters << "; stride=" << stride << std::endl; return false; } // Allocate arrays on the host Array = (uint64_t *) malloc(memSize); if ( Array == nullptr ) { std::cout << "CT_CUDA::AllocateData : 'Array' could not be allocated" << std::endl; return false; } Idx = (uint64_t *) malloc(idxMemSize); if ( Idx == nullptr ) { std::cout << "CT_CUDA::AllocateData : 'Idx' could not be allocated" << std::endl; if(Array){ free(Array); } return false; } // allocate data on the target device if ( cudaMalloc(&d_Array, memSize) != cudaSuccess ) { std::cout << "CT_CUDA::AllocateData : 'd_Array' could not be allocated on device" << std::endl; cudaFree(d_Array); if(Array){ free(Array); } if(Idx){ free(Idx); } return false; } if ( cudaMalloc(&d_Idx, idxMemSize) != cudaSuccess ) { std::cout << "CT_CUDA::AllocateData : 'd_Idx' could not be alloced on device" << std::endl; cudaFree(d_Array); cudaFree(d_Idx); if(Array){ free(Array); } if(Idx){ free(Idx); } return false; } // Randomize the arrays on the host srand(time(NULL)); if ( this->GetBenchType() == CT_PTRCHASE ) { for ( unsigned i = 0; i < idxElems; i++ ) { Idx[i] = (uint64_t)(rand()%(idxElems - 1)); } } else { for ( unsigned i = 0; i < idxElems; i++ ) { Idx[i] = (uint64_t)(rand()%(elems - 1)); } } for ( unsigned i=0; i<elems; i++ ) { Array[i] = (uint64_t)(rand()); } // copy arrays from host to target device if ( cudaMemcpy(d_Array, Array, memSize, cudaMemcpyHostToDevice) != cudaSuccess ) { std::cout << "CT_CUDA::AllocateData : 'd_Array' could not be copied to device" << std::endl; cudaFree(d_Array); cudaFree(d_Idx); if(Array){ free(Array); } if(Idx){ free(Idx); } return false; } if ( cudaMemcpy(d_Idx, Idx, idxMemSize, cudaMemcpyHostToDevice) != cudaSuccess ) { std::cout << "CT_CUDA::AllocateData : 'd_Idx' could not be copied to device" << std::endl; cudaFree(d_Array); cudaFree(d_Idx); if(Array){ free(Array); } if(Idx){ free(Idx); } return false; } return true; } bool CT_CUDA::Execute(double &Timing, double &GAMS) { CTBaseImpl::CTBenchType BType = this->GetBenchType(); // benchmark type CTBaseImpl::CTAtomType AType = this->GetAtomType(); // atomic type double StartTime = 0.; // start time double EndTime = 0.; // end time double OPS = 0.; // billions of operations // determine benchmark type and launch the desired kernel if ( BType == CT_RAND ) { switch ( AType ) { case CT_ADD: cudaDeviceSynchronize(); StartTime = this->MySecond(); RAND_ADD<<< threadBlocks, threadsPerBlock >>>( d_Array, d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: StartTime = this->MySecond(); RAND_CAS<<< threadBlocks, threadsPerBlock >>>( d_Array, d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_STRIDE1 ) { switch( AType ) { case CT_ADD: cudaDeviceSynchronize(); StartTime = this->MySecond(); STRIDE1_ADD<<< threadBlocks, threadsPerBlock >>>( d_Array, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: cudaDeviceSynchronize(); StartTime = this->MySecond(); STRIDE1_CAS<<< threadBlocks, threadsPerBlock >>>( d_Array, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_STRIDEN ) { switch( AType ) { case CT_ADD: cudaDeviceSynchronize(); StartTime = this->MySecond(); STRIDEN_ADD<<< threadBlocks, threadsPerBlock >>>( d_Array, iters, stride ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: cudaDeviceSynchronize(); StartTime = this->MySecond(); STRIDEN_CAS<<< threadBlocks, threadsPerBlock >>>( d_Array, iters, stride ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_PTRCHASE ) { /* PTRCHASE kernels use only a single thread per block and, * * as such, threadsPerBlock as specified by the user is ignored. */ switch( AType ) { case CT_ADD: cudaDeviceSynchronize(); StartTime = this->MySecond(); PTRCHASE_ADD<<< threadBlocks, 1 >>>( d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: cudaDeviceSynchronize(); StartTime = this->MySecond(); PTRCHASE_CAS<<< threadBlocks, 1 >>>( d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_SG ) { switch( AType ) { case CT_ADD: cudaDeviceSynchronize(); StartTime = this->MySecond(); SG_ADD<<< threadBlocks, threadsPerBlock >>>( d_Array, d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(4, iters, threadBlocks); break; case CT_CAS: cudaDeviceSynchronize(); StartTime = this->MySecond(); SG_CAS<<< threadBlocks, threadsPerBlock >>>( d_Array, d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(4, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_CENTRAL ) { switch( AType ) { case CT_ADD: cudaDeviceSynchronize(); StartTime = this->MySecond(); CENTRAL_ADD<<< threadBlocks, threadsPerBlock >>>( d_Array, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; case CT_CAS: cudaDeviceSynchronize(); StartTime = this->MySecond(); CENTRAL_CAS<<< threadBlocks, threadsPerBlock >>>( d_Array, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(1, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_SCATTER ) { switch( AType ) { case CT_ADD: cudaDeviceSynchronize(); StartTime = this->MySecond(); SCATTER_ADD<<< threadBlocks, threadsPerBlock >>>( d_Array, d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(3, iters, threadBlocks); break; case CT_CAS: cudaDeviceSynchronize(); StartTime = this->MySecond(); SCATTER_CAS<<< threadBlocks, threadsPerBlock >>>( d_Array, d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(3, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else if ( BType == CT_GATHER ) { switch( AType ) { case CT_ADD: cudaDeviceSynchronize(); StartTime = this->MySecond(); GATHER_ADD<<< threadBlocks, threadsPerBlock >>>( d_Array, d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(3, iters, threadBlocks); break; case CT_CAS: cudaDeviceSynchronize(); StartTime = this->MySecond(); GATHER_CAS<<< threadBlocks, threadsPerBlock >>>( d_Array, d_Idx, iters ); cudaDeviceSynchronize(); EndTime = this->MySecond(); OPS = this->GAM(3, iters, threadBlocks); break; default: this->ReportBenchError(); return false; break; } } else { this->ReportBenchError(); return false; } Timing = this->Runtime(StartTime,EndTime); GAMS = OPS/Timing; return true; } bool CT_CUDA::FreeData() { if ( Array ) { free(Array); } if ( Idx ) { free(Idx); } if ( d_Array ) { cudaFree(d_Array); } if ( d_Idx ) { cudaFree(d_Idx); } return true; } #endif // _CT_CUDA_CUH_ // EOF
70c5fb84951403e39980a157dc8d996c81cb2431.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017-2020 by Contributors */ #include <memory> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../data/ellpack_page.cuh" #include "../data/device_adapter.cuh" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); struct SparsePageView { common::Span<const Entry> d_data; common::Span<const bst_row_t> d_row_ptr; XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data, common::Span<const bst_row_t> row_ptr) : d_data{data}, d_row_ptr{row_ptr} {} }; struct SparsePageLoader { bool use_shared; common::Span<const bst_row_t> d_row_ptr; common::Span<const Entry> d_data; bst_feature_t num_features; float* smem; size_t entry_start; __device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : use_shared(use_shared), d_row_ptr(data.d_row_ptr), d_data(data.d_data), num_features(num_features), entry_start(entry_start) { extern __shared__ float _smem[]; smem = _smem; // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = d_row_ptr[global_idx]; bst_uint elem_end = d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = d_data[elem_idx - entry_start]; smem[threadIdx.x * num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetFvalue(int ridx, int fidx) const { if (use_shared) { return smem[threadIdx.x * num_features + fidx]; } else { // Binary search auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start); auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start); common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } } }; struct EllpackLoader { EllpackDeviceAccessor const& matrix; XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : matrix{m} {} __device__ __forceinline__ float GetFvalue(int ridx, int fidx) const { auto gidx = matrix.GetBinIndex(ridx, fidx); if (gidx == -1) { return nan(""); } // The gradient index needs to be shifted by one as min values are not included in the // cuts. if (gidx == matrix.feature_segments[fidx]) { return matrix.min_fvalue[fidx]; } return matrix.gidx_fvalue_map[gidx - 1]; } }; template <typename Batch> struct DeviceAdapterLoader { Batch batch; bst_feature_t columns; float* smem; bool use_shared; using BatchT = Batch; DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : batch{batch}, columns{num_features}, use_shared{use_shared} { extern __shared__ float _smem[]; smem = _smem; if (use_shared) { uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x; size_t shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { auto beg = global_idx * columns; auto end = (global_idx + 1) * columns; for (size_t i = beg; i < end; ++i) { smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value; } } } __syncthreads(); } DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const { if (use_shared) { return smem[threadIdx.x * columns + fidx]; } return batch.GetElement(ridx * columns + fidx).value; } }; template <typename Loader> __device__ float GetLeafWeight(bst_uint ridx, const RegTree::Node* tree, Loader* loader) { RegTree::Node n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetFvalue(ridx, n.SplitIndex()); // Missing value if (isnan(fvalue)) { n = tree[n.DefaultChild()]; } else { if (fvalue < n.SplitCond()) { n = tree[n.LeftChild()]; } else { n = tree[n.RightChild()]; } } } return n.LeafValue(); } template <typename Loader, typename Data> __global__ void PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; Loader loader(data, use_shared, num_features, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; float leaf = GetLeafWeight(global_idx, d_tree, &loader); sum += leaf; } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class DeviceModel { public: dh::device_vector<RegTree::Node> nodes; dh::device_vector<size_t> tree_segments; dh::device_vector<int> tree_group; size_t tree_beg_; // NOLINT size_t tree_end_; // NOLINT int num_group; void CopyModel(const gbm::GBTreeModel& model, const thrust::host_vector<size_t>& h_tree_segments, const thrust::host_vector<RegTree::Node>& h_nodes, size_t tree_begin, size_t tree_end) { nodes.resize(h_nodes.size()); dh::safe_cuda(hipMemcpyAsync(nodes.data().get(), h_nodes.data(), sizeof(RegTree::Node) * h_nodes.size(), hipMemcpyHostToDevice)); tree_segments.resize(h_tree_segments.size()); dh::safe_cuda(hipMemcpyAsync(tree_segments.data().get(), h_tree_segments.data(), sizeof(size_t) * h_tree_segments.size(), hipMemcpyHostToDevice)); tree_group.resize(model.tree_info.size()); dh::safe_cuda(hipMemcpyAsync(tree_group.data().get(), model.tree_info.data(), sizeof(int) * model.tree_info.size(), hipMemcpyHostToDevice)); this->tree_beg_ = tree_begin; this->tree_end_ = tree_end; this->num_group = model.learner_model_param->num_output_group; } void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) { dh::safe_cuda(hipSetDevice(gpu_id)); CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device thrust::host_vector<size_t> h_tree_segments{}; h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } thrust::host_vector<RegTree::Node> h_nodes(h_tree_segments.back()); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } CopyModel(model, h_tree_segments, h_nodes, tree_begin, tree_end); } }; class GPUPredictor : public xgboost::Predictor { private: void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { batch.offset.SetDevice(generic_param_->gpu_id); batch.data.SetDevice(generic_param_->gpu_id); const uint32_t BLOCK_THREADS = 128; size_t num_rows = batch.Size(); auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan()}; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<SparsePageLoader, SparsePageView>, data, dh::ToSpan(model_.nodes), predictions->DeviceSpan().subspan(batch_offset), dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group), model_.tree_beg_, model_.tree_end_, num_features, num_rows, entry_start, use_shared, model_.num_group); } void PredictInternal(EllpackDeviceAccessor const& batch, HostDeviceVector<bst_float>* out_preds, size_t batch_offset) { const uint32_t BLOCK_THREADS = 256; size_t num_rows = batch.n_rows; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); bool use_shared = false; size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} ( PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch, dh::ToSpan(model_.nodes), out_preds->DeviceSpan().subspan(batch_offset), dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group), model_.tree_beg_, model_.tree_end_, batch.NumFeatures(), num_rows, entry_start, use_shared, model_.num_group); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { dh::safe_cuda(hipSetDevice(generic_param_->gpu_id)); if (tree_end - tree_begin == 0) { return; } model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id); out_preds->SetDevice(generic_param_->gpu_id); if (dmat->PageExists<SparsePage>()) { size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { this->PredictInternal(batch, model.learner_model_param->num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.learner_model_param->num_output_group; } } else { size_t batch_offset = 0; for (auto const& page : dmat->GetBatches<EllpackPage>()) { this->PredictInternal( page.Impl()->GetDeviceAccessor(generic_param_->gpu_id), out_preds, batch_offset); batch_offset += page.Impl()->n_rows; } } } public: explicit GPUPredictor(GenericParameter const* generic_param) : Predictor::Predictor{generic_param} {} ~GPUPredictor() override { if (generic_param_->gpu_id >= 0) { dh::safe_cuda(hipSetDevice(generic_param_->gpu_id)); } } void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { // This function is duplicated with CPU predictor PredictBatch, see comments in there. // FIXME(trivialfis): Remove the duplication. std::lock_guard<std::mutex> const guard(lock_); int device = generic_param_->gpu_id; CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data."; ConfigureDevice(device); CHECK_EQ(tree_begin, 0); auto* out_preds = &predts->predictions; CHECK_GE(predts->version, tree_begin); if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) { CHECK_EQ(predts->version, 0); } if (predts->version == 0) { this->InitOutPredictions(dmat->Info(), out_preds, model); } uint32_t const output_groups = model.learner_model_param->num_output_group; CHECK_NE(output_groups, 0); uint32_t real_ntree_limit = ntree_limit * output_groups; if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) { real_ntree_limit = static_cast<uint32_t>(model.trees.size()); } uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups; if (predts->version > end_version) { CHECK_NE(ntree_limit, 0); this->InitOutPredictions(dmat->Info(), out_preds, model); predts->version = 0; } uint32_t const beg_version = predts->version; CHECK_LE(beg_version, end_version); if (beg_version < end_version) { this->DevicePredictInternal(dmat, out_preds, model, beg_version * output_groups, end_version * output_groups); } uint32_t delta = end_version - beg_version; CHECK_LE(delta, model.trees.size()); predts->Update(delta); CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ || out_preds->Size() == dmat->Info().num_row_); } template <typename Adapter, typename Loader> void DispatchedInplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, uint32_t tree_end) const { auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id); uint32_t const output_groups = model.learner_model_param->num_output_group; DeviceModel d_model; d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id); auto m = dmlc::get<std::shared_ptr<Adapter>>(x); CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature) << "Number of columns in data must equal to trained model."; CHECK_EQ(this->generic_param_->gpu_id, m->DeviceIdx()) << "XGBoost is running on device: " << this->generic_param_->gpu_id << ", " << "but data is on: " << m->DeviceIdx(); MetaInfo info; info.num_col_ = m->NumColumns(); info.num_row_ = m->NumRows(); this->InitOutPredictions(info, &(out_preds->predictions), model); const uint32_t BLOCK_THREADS = 128; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * m->NumColumns() * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<Loader, typename Loader::BatchT>, m->Value(), dh::ToSpan(d_model.nodes), out_preds->predictions.DeviceSpan(), dh::ToSpan(d_model.tree_segments), dh::ToSpan(d_model.tree_group), tree_begin, tree_end, m->NumColumns(), info.num_row_, entry_start, use_shared, output_groups); } void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, unsigned tree_end) const override { if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) { this->DispatchedInplacePredict< data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>( x, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) { this->DispatchedInplacePredict< data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>( x, model, missing, out_preds, tree_begin, tree_end); } else { LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor."; } } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.learner_model_param->num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(generic_param_->gpu_id); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.learner_model_param->base_score); } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate, int condition, unsigned condition_feature) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override { Predictor::Configure(cfg); } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device); } } std::mutex lock_; DeviceModel model_; size_t max_shared_memory_bytes_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([](GenericParameter const* generic_param) { return new GPUPredictor(generic_param); }); } // namespace predictor } // namespace xgboost
70c5fb84951403e39980a157dc8d996c81cb2431.cu
/*! * Copyright 2017-2020 by Contributors */ #include <memory> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../data/ellpack_page.cuh" #include "../data/device_adapter.cuh" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); struct SparsePageView { common::Span<const Entry> d_data; common::Span<const bst_row_t> d_row_ptr; XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data, common::Span<const bst_row_t> row_ptr) : d_data{data}, d_row_ptr{row_ptr} {} }; struct SparsePageLoader { bool use_shared; common::Span<const bst_row_t> d_row_ptr; common::Span<const Entry> d_data; bst_feature_t num_features; float* smem; size_t entry_start; __device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : use_shared(use_shared), d_row_ptr(data.d_row_ptr), d_data(data.d_data), num_features(num_features), entry_start(entry_start) { extern __shared__ float _smem[]; smem = _smem; // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = d_row_ptr[global_idx]; bst_uint elem_end = d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = d_data[elem_idx - entry_start]; smem[threadIdx.x * num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetFvalue(int ridx, int fidx) const { if (use_shared) { return smem[threadIdx.x * num_features + fidx]; } else { // Binary search auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start); auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start); common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } } }; struct EllpackLoader { EllpackDeviceAccessor const& matrix; XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : matrix{m} {} __device__ __forceinline__ float GetFvalue(int ridx, int fidx) const { auto gidx = matrix.GetBinIndex(ridx, fidx); if (gidx == -1) { return nan(""); } // The gradient index needs to be shifted by one as min values are not included in the // cuts. if (gidx == matrix.feature_segments[fidx]) { return matrix.min_fvalue[fidx]; } return matrix.gidx_fvalue_map[gidx - 1]; } }; template <typename Batch> struct DeviceAdapterLoader { Batch batch; bst_feature_t columns; float* smem; bool use_shared; using BatchT = Batch; DEV_INLINE DeviceAdapterLoader(Batch const batch, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : batch{batch}, columns{num_features}, use_shared{use_shared} { extern __shared__ float _smem[]; smem = _smem; if (use_shared) { uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x; size_t shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { auto beg = global_idx * columns; auto end = (global_idx + 1) * columns; for (size_t i = beg; i < end; ++i) { smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value; } } } __syncthreads(); } DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const { if (use_shared) { return smem[threadIdx.x * columns + fidx]; } return batch.GetElement(ridx * columns + fidx).value; } }; template <typename Loader> __device__ float GetLeafWeight(bst_uint ridx, const RegTree::Node* tree, Loader* loader) { RegTree::Node n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetFvalue(ridx, n.SplitIndex()); // Missing value if (isnan(fvalue)) { n = tree[n.DefaultChild()]; } else { if (fvalue < n.SplitCond()) { n = tree[n.LeftChild()]; } else { n = tree[n.RightChild()]; } } } return n.LeafValue(); } template <typename Loader, typename Data> __global__ void PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; Loader loader(data, use_shared, num_features, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; float leaf = GetLeafWeight(global_idx, d_tree, &loader); sum += leaf; } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class DeviceModel { public: dh::device_vector<RegTree::Node> nodes; dh::device_vector<size_t> tree_segments; dh::device_vector<int> tree_group; size_t tree_beg_; // NOLINT size_t tree_end_; // NOLINT int num_group; void CopyModel(const gbm::GBTreeModel& model, const thrust::host_vector<size_t>& h_tree_segments, const thrust::host_vector<RegTree::Node>& h_nodes, size_t tree_begin, size_t tree_end) { nodes.resize(h_nodes.size()); dh::safe_cuda(cudaMemcpyAsync(nodes.data().get(), h_nodes.data(), sizeof(RegTree::Node) * h_nodes.size(), cudaMemcpyHostToDevice)); tree_segments.resize(h_tree_segments.size()); dh::safe_cuda(cudaMemcpyAsync(tree_segments.data().get(), h_tree_segments.data(), sizeof(size_t) * h_tree_segments.size(), cudaMemcpyHostToDevice)); tree_group.resize(model.tree_info.size()); dh::safe_cuda(cudaMemcpyAsync(tree_group.data().get(), model.tree_info.data(), sizeof(int) * model.tree_info.size(), cudaMemcpyHostToDevice)); this->tree_beg_ = tree_begin; this->tree_end_ = tree_end; this->num_group = model.learner_model_param->num_output_group; } void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) { dh::safe_cuda(cudaSetDevice(gpu_id)); CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device thrust::host_vector<size_t> h_tree_segments{}; h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } thrust::host_vector<RegTree::Node> h_nodes(h_tree_segments.back()); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } CopyModel(model, h_tree_segments, h_nodes, tree_begin, tree_end); } }; class GPUPredictor : public xgboost::Predictor { private: void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { batch.offset.SetDevice(generic_param_->gpu_id); batch.data.SetDevice(generic_param_->gpu_id); const uint32_t BLOCK_THREADS = 128; size_t num_rows = batch.Size(); auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan()}; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<SparsePageLoader, SparsePageView>, data, dh::ToSpan(model_.nodes), predictions->DeviceSpan().subspan(batch_offset), dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group), model_.tree_beg_, model_.tree_end_, num_features, num_rows, entry_start, use_shared, model_.num_group); } void PredictInternal(EllpackDeviceAccessor const& batch, HostDeviceVector<bst_float>* out_preds, size_t batch_offset) { const uint32_t BLOCK_THREADS = 256; size_t num_rows = batch.n_rows; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); bool use_shared = false; size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} ( PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch, dh::ToSpan(model_.nodes), out_preds->DeviceSpan().subspan(batch_offset), dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group), model_.tree_beg_, model_.tree_end_, batch.NumFeatures(), num_rows, entry_start, use_shared, model_.num_group); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id)); if (tree_end - tree_begin == 0) { return; } model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id); out_preds->SetDevice(generic_param_->gpu_id); if (dmat->PageExists<SparsePage>()) { size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { this->PredictInternal(batch, model.learner_model_param->num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.learner_model_param->num_output_group; } } else { size_t batch_offset = 0; for (auto const& page : dmat->GetBatches<EllpackPage>()) { this->PredictInternal( page.Impl()->GetDeviceAccessor(generic_param_->gpu_id), out_preds, batch_offset); batch_offset += page.Impl()->n_rows; } } } public: explicit GPUPredictor(GenericParameter const* generic_param) : Predictor::Predictor{generic_param} {} ~GPUPredictor() override { if (generic_param_->gpu_id >= 0) { dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id)); } } void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { // This function is duplicated with CPU predictor PredictBatch, see comments in there. // FIXME(trivialfis): Remove the duplication. std::lock_guard<std::mutex> const guard(lock_); int device = generic_param_->gpu_id; CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data."; ConfigureDevice(device); CHECK_EQ(tree_begin, 0); auto* out_preds = &predts->predictions; CHECK_GE(predts->version, tree_begin); if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) { CHECK_EQ(predts->version, 0); } if (predts->version == 0) { this->InitOutPredictions(dmat->Info(), out_preds, model); } uint32_t const output_groups = model.learner_model_param->num_output_group; CHECK_NE(output_groups, 0); uint32_t real_ntree_limit = ntree_limit * output_groups; if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) { real_ntree_limit = static_cast<uint32_t>(model.trees.size()); } uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups; if (predts->version > end_version) { CHECK_NE(ntree_limit, 0); this->InitOutPredictions(dmat->Info(), out_preds, model); predts->version = 0; } uint32_t const beg_version = predts->version; CHECK_LE(beg_version, end_version); if (beg_version < end_version) { this->DevicePredictInternal(dmat, out_preds, model, beg_version * output_groups, end_version * output_groups); } uint32_t delta = end_version - beg_version; CHECK_LE(delta, model.trees.size()); predts->Update(delta); CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ || out_preds->Size() == dmat->Info().num_row_); } template <typename Adapter, typename Loader> void DispatchedInplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, uint32_t tree_end) const { auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id); uint32_t const output_groups = model.learner_model_param->num_output_group; DeviceModel d_model; d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id); auto m = dmlc::get<std::shared_ptr<Adapter>>(x); CHECK_EQ(m->NumColumns(), model.learner_model_param->num_feature) << "Number of columns in data must equal to trained model."; CHECK_EQ(this->generic_param_->gpu_id, m->DeviceIdx()) << "XGBoost is running on device: " << this->generic_param_->gpu_id << ", " << "but data is on: " << m->DeviceIdx(); MetaInfo info; info.num_col_ = m->NumColumns(); info.num_row_ = m->NumRows(); this->InitOutPredictions(info, &(out_preds->predictions), model); const uint32_t BLOCK_THREADS = 128; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * m->NumColumns() * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<Loader, typename Loader::BatchT>, m->Value(), dh::ToSpan(d_model.nodes), out_preds->predictions.DeviceSpan(), dh::ToSpan(d_model.tree_segments), dh::ToSpan(d_model.tree_group), tree_begin, tree_end, m->NumColumns(), info.num_row_, entry_start, use_shared, output_groups); } void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, unsigned tree_end) const override { if (x.type() == typeid(std::shared_ptr<data::CupyAdapter>)) { this->DispatchedInplacePredict< data::CupyAdapter, DeviceAdapterLoader<data::CupyAdapterBatch>>( x, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(std::shared_ptr<data::CudfAdapter>)) { this->DispatchedInplacePredict< data::CudfAdapter, DeviceAdapterLoader<data::CudfAdapterBatch>>( x, model, missing, out_preds, tree_begin, tree_end); } else { LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor."; } } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.learner_model_param->num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(generic_param_->gpu_id); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.learner_model_param->base_score); } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate, int condition, unsigned condition_feature) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override { Predictor::Configure(cfg); } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device); } } std::mutex lock_; DeviceModel model_; size_t max_shared_memory_bytes_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([](GenericParameter const* generic_param) { return new GPUPredictor(generic_param); }); } // namespace predictor } // namespace xgboost
7ae5570146f15ba23c19cf906c43eddb23f7e295.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 1 #define TW 2 #define TC 32 #define C 128 #define N 96 #define H 14 #define W 14 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(hipError_t code) { if (code != hipSuccess) { std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[8]; __shared__ float pad_temp_shared[32]; __shared__ float kernel_shared[128]; float pad_temp_shared_local[8]; float kernel_shared_local[4]; compute_local[(0)] = 0.000000e+00f; compute_local[(4)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(5)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(6)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; compute_local[(7)] = 0.000000e+00f; for (int rc_outer = 0; rc_outer < 16; ++rc_outer) { for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { __syncthreads(); pad_temp_shared[((((int)threadIdx.z) * 4))] = (((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 15))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 14))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = (((((((int)blockIdx.y) * 2) + ry_outer) < 14) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 1))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)))] : 0.000000e+00f); kernel_shared[((((int)threadIdx.z) * 16))] = kernel[(((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)))]; kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 9))]; kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 18))]; kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 27))]; kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 36))]; kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 45))]; kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 54))]; kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 63))]; kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1152))]; kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1161))]; kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1170))]; kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1179))]; kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1188))]; kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1197))]; kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1206))]; kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1215))]; __syncthreads(); pad_temp_shared_local[(0)] = pad_temp_shared[(0)]; pad_temp_shared_local[(1)] = pad_temp_shared[(1)]; pad_temp_shared_local[(2)] = pad_temp_shared[(2)]; pad_temp_shared_local[(3)] = pad_temp_shared[(3)]; pad_temp_shared_local[(4)] = pad_temp_shared[(4)]; pad_temp_shared_local[(5)] = pad_temp_shared[(5)]; pad_temp_shared_local[(6)] = pad_temp_shared[(6)]; pad_temp_shared_local[(7)] = pad_temp_shared[(7)]; kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(8)]; pad_temp_shared_local[(1)] = pad_temp_shared[(9)]; pad_temp_shared_local[(2)] = pad_temp_shared[(10)]; pad_temp_shared_local[(3)] = pad_temp_shared[(11)]; pad_temp_shared_local[(4)] = pad_temp_shared[(12)]; pad_temp_shared_local[(5)] = pad_temp_shared[(13)]; pad_temp_shared_local[(6)] = pad_temp_shared[(14)]; pad_temp_shared_local[(7)] = pad_temp_shared[(15)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(16)]; pad_temp_shared_local[(1)] = pad_temp_shared[(17)]; pad_temp_shared_local[(2)] = pad_temp_shared[(18)]; pad_temp_shared_local[(3)] = pad_temp_shared[(19)]; pad_temp_shared_local[(4)] = pad_temp_shared[(20)]; pad_temp_shared_local[(5)] = pad_temp_shared[(21)]; pad_temp_shared_local[(6)] = pad_temp_shared[(22)]; pad_temp_shared_local[(7)] = pad_temp_shared[(23)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(24)]; pad_temp_shared_local[(1)] = pad_temp_shared[(25)]; pad_temp_shared_local[(2)] = pad_temp_shared[(26)]; pad_temp_shared_local[(3)] = pad_temp_shared[(27)]; pad_temp_shared_local[(4)] = pad_temp_shared[(28)]; pad_temp_shared_local[(5)] = pad_temp_shared[(29)]; pad_temp_shared_local[(6)] = pad_temp_shared[(30)]; pad_temp_shared_local[(7)] = pad_temp_shared[(31)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); __syncthreads(); pad_temp_shared[((((int)threadIdx.z) * 4))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 14))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 13))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 1))] : 0.000000e+00f); kernel_shared[((((int)threadIdx.z) * 16))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1))]; kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 10))]; kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 19))]; kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 28))]; kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 37))]; kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 46))]; kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 55))]; kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 64))]; kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1153))]; kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1162))]; kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1171))]; kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1180))]; kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1189))]; kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1198))]; kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1207))]; kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1216))]; __syncthreads(); pad_temp_shared_local[(0)] = pad_temp_shared[(0)]; pad_temp_shared_local[(1)] = pad_temp_shared[(1)]; pad_temp_shared_local[(2)] = pad_temp_shared[(2)]; pad_temp_shared_local[(3)] = pad_temp_shared[(3)]; pad_temp_shared_local[(4)] = pad_temp_shared[(4)]; pad_temp_shared_local[(5)] = pad_temp_shared[(5)]; pad_temp_shared_local[(6)] = pad_temp_shared[(6)]; pad_temp_shared_local[(7)] = pad_temp_shared[(7)]; kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(8)]; pad_temp_shared_local[(1)] = pad_temp_shared[(9)]; pad_temp_shared_local[(2)] = pad_temp_shared[(10)]; pad_temp_shared_local[(3)] = pad_temp_shared[(11)]; pad_temp_shared_local[(4)] = pad_temp_shared[(12)]; pad_temp_shared_local[(5)] = pad_temp_shared[(13)]; pad_temp_shared_local[(6)] = pad_temp_shared[(14)]; pad_temp_shared_local[(7)] = pad_temp_shared[(15)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(16)]; pad_temp_shared_local[(1)] = pad_temp_shared[(17)]; pad_temp_shared_local[(2)] = pad_temp_shared[(18)]; pad_temp_shared_local[(3)] = pad_temp_shared[(19)]; pad_temp_shared_local[(4)] = pad_temp_shared[(20)]; pad_temp_shared_local[(5)] = pad_temp_shared[(21)]; pad_temp_shared_local[(6)] = pad_temp_shared[(22)]; pad_temp_shared_local[(7)] = pad_temp_shared[(23)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(24)]; pad_temp_shared_local[(1)] = pad_temp_shared[(25)]; pad_temp_shared_local[(2)] = pad_temp_shared[(26)]; pad_temp_shared_local[(3)] = pad_temp_shared[(27)]; pad_temp_shared_local[(4)] = pad_temp_shared[(28)]; pad_temp_shared_local[(5)] = pad_temp_shared[(29)]; pad_temp_shared_local[(6)] = pad_temp_shared[(30)]; pad_temp_shared_local[(7)] = pad_temp_shared[(31)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); __syncthreads(); pad_temp_shared[((((int)threadIdx.z) * 4))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 13))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = (((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) && (((int)blockIdx.x) < 6)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 12))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 1))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = (((((((int)blockIdx.y) * 2) + ry_outer) < 14) && (((int)blockIdx.x) < 6)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 2))] : 0.000000e+00f); kernel_shared[((((int)threadIdx.z) * 16))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 2))]; kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 11))]; kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 20))]; kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 29))]; kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 38))]; kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 47))]; kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 56))]; kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 65))]; kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1154))]; kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1163))]; kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1172))]; kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1181))]; kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1190))]; kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1199))]; kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1208))]; kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1217))]; __syncthreads(); pad_temp_shared_local[(0)] = pad_temp_shared[(0)]; pad_temp_shared_local[(1)] = pad_temp_shared[(1)]; pad_temp_shared_local[(2)] = pad_temp_shared[(2)]; pad_temp_shared_local[(3)] = pad_temp_shared[(3)]; pad_temp_shared_local[(4)] = pad_temp_shared[(4)]; pad_temp_shared_local[(5)] = pad_temp_shared[(5)]; pad_temp_shared_local[(6)] = pad_temp_shared[(6)]; pad_temp_shared_local[(7)] = pad_temp_shared[(7)]; kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(8)]; pad_temp_shared_local[(1)] = pad_temp_shared[(9)]; pad_temp_shared_local[(2)] = pad_temp_shared[(10)]; pad_temp_shared_local[(3)] = pad_temp_shared[(11)]; pad_temp_shared_local[(4)] = pad_temp_shared[(12)]; pad_temp_shared_local[(5)] = pad_temp_shared[(13)]; pad_temp_shared_local[(6)] = pad_temp_shared[(14)]; pad_temp_shared_local[(7)] = pad_temp_shared[(15)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(16)]; pad_temp_shared_local[(1)] = pad_temp_shared[(17)]; pad_temp_shared_local[(2)] = pad_temp_shared[(18)]; pad_temp_shared_local[(3)] = pad_temp_shared[(19)]; pad_temp_shared_local[(4)] = pad_temp_shared[(20)]; pad_temp_shared_local[(5)] = pad_temp_shared[(21)]; pad_temp_shared_local[(6)] = pad_temp_shared[(22)]; pad_temp_shared_local[(7)] = pad_temp_shared[(23)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(24)]; pad_temp_shared_local[(1)] = pad_temp_shared[(25)]; pad_temp_shared_local[(2)] = pad_temp_shared[(26)]; pad_temp_shared_local[(3)] = pad_temp_shared[(27)]; pad_temp_shared_local[(4)] = pad_temp_shared[(28)]; pad_temp_shared_local[(5)] = pad_temp_shared[(29)]; pad_temp_shared_local[(6)] = pad_temp_shared[(30)]; pad_temp_shared_local[(7)] = pad_temp_shared[(31)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); } } compute[(((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)))] = compute_local[(0)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1568))] = compute_local[(4)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1))] = compute_local[(1)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1569))] = compute_local[(5)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 14))] = compute_local[(2)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1582))] = compute_local[(6)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 15))] = compute_local[(3)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1583))] = compute_local[(7)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; hipMalloc(&device_input,C*H*W*sizeof(float)); hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; hipEvent_t event_start; hipEvent_t event_stop; hipEventCreate(&event_start); hipEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; hipMalloc(&device_out,H*W*N*sizeof(float)); hipMemset(device_out,0,H*W*N*sizeof(float)); hipMalloc(&device_K,C*N*9*sizeof(float)); hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice); hipEventRecord(event_start); convGemm.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnGemmTime; hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop); hipEventRecord(event_start); convWinogradeNon.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnWinogradeTimeNon; hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); hipEventRecord(event_start); convFFT.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnFFTTime; hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(7,7,6); dim3 block(1,1,8); hipEventRecord(event_start); hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tvm; hipEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); hipMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); hipEventRecord(event_start); hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tdc; hipEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_tvm, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
7ae5570146f15ba23c19cf906c43eddb23f7e295.cu
#include <cudnn.h> #include <stdio.h> #include <cuda.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 1 #define TW 2 #define TC 32 #define C 128 #define N 96 #define H 14 #define W 14 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(cudaError_t code) { if (code != cudaSuccess) { std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[8]; __shared__ float pad_temp_shared[32]; __shared__ float kernel_shared[128]; float pad_temp_shared_local[8]; float kernel_shared_local[4]; compute_local[(0)] = 0.000000e+00f; compute_local[(4)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(5)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(6)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; compute_local[(7)] = 0.000000e+00f; for (int rc_outer = 0; rc_outer < 16; ++rc_outer) { for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { __syncthreads(); pad_temp_shared[((((int)threadIdx.z) * 4))] = (((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 15))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 14))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = (((((((int)blockIdx.y) * 2) + ry_outer) < 14) && (1 <= ((int)blockIdx.x))) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 1))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)))] : 0.000000e+00f); kernel_shared[((((int)threadIdx.z) * 16))] = kernel[(((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)))]; kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 9))]; kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 18))]; kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 27))]; kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 36))]; kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 45))]; kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 54))]; kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 63))]; kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1152))]; kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1161))]; kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1170))]; kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1179))]; kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1188))]; kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1197))]; kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1206))]; kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1215))]; __syncthreads(); pad_temp_shared_local[(0)] = pad_temp_shared[(0)]; pad_temp_shared_local[(1)] = pad_temp_shared[(1)]; pad_temp_shared_local[(2)] = pad_temp_shared[(2)]; pad_temp_shared_local[(3)] = pad_temp_shared[(3)]; pad_temp_shared_local[(4)] = pad_temp_shared[(4)]; pad_temp_shared_local[(5)] = pad_temp_shared[(5)]; pad_temp_shared_local[(6)] = pad_temp_shared[(6)]; pad_temp_shared_local[(7)] = pad_temp_shared[(7)]; kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(8)]; pad_temp_shared_local[(1)] = pad_temp_shared[(9)]; pad_temp_shared_local[(2)] = pad_temp_shared[(10)]; pad_temp_shared_local[(3)] = pad_temp_shared[(11)]; pad_temp_shared_local[(4)] = pad_temp_shared[(12)]; pad_temp_shared_local[(5)] = pad_temp_shared[(13)]; pad_temp_shared_local[(6)] = pad_temp_shared[(14)]; pad_temp_shared_local[(7)] = pad_temp_shared[(15)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(16)]; pad_temp_shared_local[(1)] = pad_temp_shared[(17)]; pad_temp_shared_local[(2)] = pad_temp_shared[(18)]; pad_temp_shared_local[(3)] = pad_temp_shared[(19)]; pad_temp_shared_local[(4)] = pad_temp_shared[(20)]; pad_temp_shared_local[(5)] = pad_temp_shared[(21)]; pad_temp_shared_local[(6)] = pad_temp_shared[(22)]; pad_temp_shared_local[(7)] = pad_temp_shared[(23)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(24)]; pad_temp_shared_local[(1)] = pad_temp_shared[(25)]; pad_temp_shared_local[(2)] = pad_temp_shared[(26)]; pad_temp_shared_local[(3)] = pad_temp_shared[(27)]; pad_temp_shared_local[(4)] = pad_temp_shared[(28)]; pad_temp_shared_local[(5)] = pad_temp_shared[(29)]; pad_temp_shared_local[(6)] = pad_temp_shared[(30)]; pad_temp_shared_local[(7)] = pad_temp_shared[(31)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); __syncthreads(); pad_temp_shared[((((int)threadIdx.z) * 4))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 14))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 13))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 1))] : 0.000000e+00f); kernel_shared[((((int)threadIdx.z) * 16))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1))]; kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 10))]; kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 19))]; kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 28))]; kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 37))]; kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 46))]; kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 55))]; kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 64))]; kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1153))]; kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1162))]; kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1171))]; kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1180))]; kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1189))]; kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1198))]; kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1207))]; kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1216))]; __syncthreads(); pad_temp_shared_local[(0)] = pad_temp_shared[(0)]; pad_temp_shared_local[(1)] = pad_temp_shared[(1)]; pad_temp_shared_local[(2)] = pad_temp_shared[(2)]; pad_temp_shared_local[(3)] = pad_temp_shared[(3)]; pad_temp_shared_local[(4)] = pad_temp_shared[(4)]; pad_temp_shared_local[(5)] = pad_temp_shared[(5)]; pad_temp_shared_local[(6)] = pad_temp_shared[(6)]; pad_temp_shared_local[(7)] = pad_temp_shared[(7)]; kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(8)]; pad_temp_shared_local[(1)] = pad_temp_shared[(9)]; pad_temp_shared_local[(2)] = pad_temp_shared[(10)]; pad_temp_shared_local[(3)] = pad_temp_shared[(11)]; pad_temp_shared_local[(4)] = pad_temp_shared[(12)]; pad_temp_shared_local[(5)] = pad_temp_shared[(13)]; pad_temp_shared_local[(6)] = pad_temp_shared[(14)]; pad_temp_shared_local[(7)] = pad_temp_shared[(15)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(16)]; pad_temp_shared_local[(1)] = pad_temp_shared[(17)]; pad_temp_shared_local[(2)] = pad_temp_shared[(18)]; pad_temp_shared_local[(3)] = pad_temp_shared[(19)]; pad_temp_shared_local[(4)] = pad_temp_shared[(20)]; pad_temp_shared_local[(5)] = pad_temp_shared[(21)]; pad_temp_shared_local[(6)] = pad_temp_shared[(22)]; pad_temp_shared_local[(7)] = pad_temp_shared[(23)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(24)]; pad_temp_shared_local[(1)] = pad_temp_shared[(25)]; pad_temp_shared_local[(2)] = pad_temp_shared[(26)]; pad_temp_shared_local[(3)] = pad_temp_shared[(27)]; pad_temp_shared_local[(4)] = pad_temp_shared[(28)]; pad_temp_shared_local[(5)] = pad_temp_shared[(29)]; pad_temp_shared_local[(6)] = pad_temp_shared[(30)]; pad_temp_shared_local[(7)] = pad_temp_shared[(31)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); __syncthreads(); pad_temp_shared[((((int)threadIdx.z) * 4))] = ((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 13))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 1))] = (((1 <= ((((int)blockIdx.y) * 2) + ry_outer)) && (((int)blockIdx.x) < 6)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) - 12))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 2))] = ((((((int)blockIdx.y) * 2) + ry_outer) < 14) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 1))] : 0.000000e+00f); pad_temp_shared[(((((int)threadIdx.z) * 4) + 3))] = (((((((int)blockIdx.y) * 2) + ry_outer) < 14) && (((int)blockIdx.x) < 6)) ? data[(((((((rc_outer * 1568) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (ry_outer * 14)) + (((int)blockIdx.x) * 2)) + 2))] : 0.000000e+00f); kernel_shared[((((int)threadIdx.z) * 16))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 2))]; kernel_shared[(((((int)threadIdx.z) * 16) + 1))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 11))]; kernel_shared[(((((int)threadIdx.z) * 16) + 2))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 20))]; kernel_shared[(((((int)threadIdx.z) * 16) + 3))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 29))]; kernel_shared[(((((int)threadIdx.z) * 16) + 4))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 38))]; kernel_shared[(((((int)threadIdx.z) * 16) + 5))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 47))]; kernel_shared[(((((int)threadIdx.z) * 16) + 6))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 56))]; kernel_shared[(((((int)threadIdx.z) * 16) + 7))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 65))]; kernel_shared[(((((int)threadIdx.z) * 16) + 8))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1154))]; kernel_shared[(((((int)threadIdx.z) * 16) + 9))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1163))]; kernel_shared[(((((int)threadIdx.z) * 16) + 10))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1172))]; kernel_shared[(((((int)threadIdx.z) * 16) + 11))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1181))]; kernel_shared[(((((int)threadIdx.z) * 16) + 12))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1190))]; kernel_shared[(((((int)threadIdx.z) * 16) + 13))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1199))]; kernel_shared[(((((int)threadIdx.z) * 16) + 14))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1208))]; kernel_shared[(((((int)threadIdx.z) * 16) + 15))] = kernel[((((((((int)blockIdx.z) * 18432) + (((int)threadIdx.z) * 2304)) + (rc_outer * 72)) + (ry_outer * 3)) + 1217))]; __syncthreads(); pad_temp_shared_local[(0)] = pad_temp_shared[(0)]; pad_temp_shared_local[(1)] = pad_temp_shared[(1)]; pad_temp_shared_local[(2)] = pad_temp_shared[(2)]; pad_temp_shared_local[(3)] = pad_temp_shared[(3)]; pad_temp_shared_local[(4)] = pad_temp_shared[(4)]; pad_temp_shared_local[(5)] = pad_temp_shared[(5)]; pad_temp_shared_local[(6)] = pad_temp_shared[(6)]; pad_temp_shared_local[(7)] = pad_temp_shared[(7)]; kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 8))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 64))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 1))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 65))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(8)]; pad_temp_shared_local[(1)] = pad_temp_shared[(9)]; pad_temp_shared_local[(2)] = pad_temp_shared[(10)]; pad_temp_shared_local[(3)] = pad_temp_shared[(11)]; pad_temp_shared_local[(4)] = pad_temp_shared[(12)]; pad_temp_shared_local[(5)] = pad_temp_shared[(13)]; pad_temp_shared_local[(6)] = pad_temp_shared[(14)]; pad_temp_shared_local[(7)] = pad_temp_shared[(15)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 2))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 66))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 3))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 67))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(16)]; pad_temp_shared_local[(1)] = pad_temp_shared[(17)]; pad_temp_shared_local[(2)] = pad_temp_shared[(18)]; pad_temp_shared_local[(3)] = pad_temp_shared[(19)]; pad_temp_shared_local[(4)] = pad_temp_shared[(20)]; pad_temp_shared_local[(5)] = pad_temp_shared[(21)]; pad_temp_shared_local[(6)] = pad_temp_shared[(22)]; pad_temp_shared_local[(7)] = pad_temp_shared[(23)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 4))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 68))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 5))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 69))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); pad_temp_shared_local[(0)] = pad_temp_shared[(24)]; pad_temp_shared_local[(1)] = pad_temp_shared[(25)]; pad_temp_shared_local[(2)] = pad_temp_shared[(26)]; pad_temp_shared_local[(3)] = pad_temp_shared[(27)]; pad_temp_shared_local[(4)] = pad_temp_shared[(28)]; pad_temp_shared_local[(5)] = pad_temp_shared[(29)]; pad_temp_shared_local[(6)] = pad_temp_shared[(30)]; pad_temp_shared_local[(7)] = pad_temp_shared[(31)]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 8) + 6))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 8) + 70))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 8) + 7))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 8) + 71))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(2)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)])); compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(1)])); compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(1)])); compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(3)])); } } compute[(((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)))] = compute_local[(0)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1568))] = compute_local[(4)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1))] = compute_local[(1)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1569))] = compute_local[(5)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 14))] = compute_local[(2)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1582))] = compute_local[(6)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 15))] = compute_local[(3)]; compute[((((((((int)blockIdx.z) * 3136) + (((int)threadIdx.z) * 196)) + (((int)blockIdx.y) * 28)) + (((int)blockIdx.x) * 2)) + 1583))] = compute_local[(7)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; cudaMalloc(&device_input,C*H*W*sizeof(float)); cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; cudaEvent_t event_start; cudaEvent_t event_stop; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; cudaMalloc(&device_out,H*W*N*sizeof(float)); cudaMemset(device_out,0,H*W*N*sizeof(float)); cudaMalloc(&device_K,C*N*9*sizeof(float)); cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(event_start); convGemm.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnGemmTime; cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop); cudaEventRecord(event_start); convWinogradeNon.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnWinogradeTimeNon; cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); cudaEventRecord(event_start); convFFT.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnFFTTime; cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(7,7,6); dim3 block(1,1,8); cudaEventRecord(event_start); default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tvm; cudaEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); cudaMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); cudaEventRecord(event_start); conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tdc; cudaEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_tvm, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }